hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
9dce32965d79dba4632291ccc20200c1f17c61ab.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Correlation_forward.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int nOutputChannels = 1; int outputHeight = YSIZE; int outputWidth = XSIZE; float *rInput1 = NULL; hipMalloc(&rInput1, XSIZE*YSIZE); int nInputChannels = 1; int inputHeight = YSIZE; int inputWidth = XSIZE; float *rInput2 = NULL; hipMalloc(&rInput2, XSIZE*YSIZE); int pad_size = XSIZE*YSIZE; int kernel_size = XSIZE*YSIZE; int max_displacement = 1; int stride1 = 2; int stride2 = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Correlation_forward), dim3(gridBlock),dim3(threadBlock), 0, 0, output,nOutputChannels,outputHeight,outputWidth,rInput1,nInputChannels,inputHeight,inputWidth,rInput2,pad_size,kernel_size,max_displacement,stride1,stride2); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Correlation_forward), dim3(gridBlock),dim3(threadBlock), 0, 0, output,nOutputChannels,outputHeight,outputWidth,rInput1,nInputChannels,inputHeight,inputWidth,rInput2,pad_size,kernel_size,max_displacement,stride1,stride2); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Correlation_forward), dim3(gridBlock),dim3(threadBlock), 0, 0, output,nOutputChannels,outputHeight,outputWidth,rInput1,nInputChannels,inputHeight,inputWidth,rInput2,pad_size,kernel_size,max_displacement,stride1,stride2); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9dce32965d79dba4632291ccc20200c1f17c61ab.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Correlation_forward.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int nOutputChannels = 1; int outputHeight = YSIZE; int outputWidth = XSIZE; float *rInput1 = NULL; cudaMalloc(&rInput1, XSIZE*YSIZE); int nInputChannels = 1; int inputHeight = YSIZE; int inputWidth = XSIZE; float *rInput2 = NULL; cudaMalloc(&rInput2, XSIZE*YSIZE); int pad_size = XSIZE*YSIZE; int kernel_size = XSIZE*YSIZE; int max_displacement = 1; int stride1 = 2; int stride2 = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Correlation_forward<<<gridBlock,threadBlock>>>(output,nOutputChannels,outputHeight,outputWidth,rInput1,nInputChannels,inputHeight,inputWidth,rInput2,pad_size,kernel_size,max_displacement,stride1,stride2); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Correlation_forward<<<gridBlock,threadBlock>>>(output,nOutputChannels,outputHeight,outputWidth,rInput1,nInputChannels,inputHeight,inputWidth,rInput2,pad_size,kernel_size,max_displacement,stride1,stride2); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Correlation_forward<<<gridBlock,threadBlock>>>(output,nOutputChannels,outputHeight,outputWidth,rInput1,nInputChannels,inputHeight,inputWidth,rInput2,pad_size,kernel_size,max_displacement,stride1,stride2); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5d0717718de018337d3b0cf69c8ebd4bef1ef015.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void gpu_matrixmult(int *gpu_a, int *gpu_b, int *gpu_c, int N) { int k, sum = 0; int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; if(col < N && row < N) { for(k = 0; k < N; k++) sum += gpu_a[row * N + k] * gpu_b[k * N + col]; gpu_c[row * N + col] = sum; } }
5d0717718de018337d3b0cf69c8ebd4bef1ef015.cu
#include "includes.h" __global__ void gpu_matrixmult(int *gpu_a, int *gpu_b, int *gpu_c, int N) { int k, sum = 0; int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; if(col < N && row < N) { for(k = 0; k < N; k++) sum += gpu_a[row * N + k] * gpu_b[k * N + col]; gpu_c[row * N + col] = sum; } }
4513ce713957ab727b690d06194c11aff2365a55.hip
// !!! This is a file automatically generated by hipify!!! /********************************************************************* By : Anfal AlYousufi Course : CIS 6930- Programming Massively Parallel Systems Project : 1 Date : May 26th 2016 Summer 2018 *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <hip/hip_runtime.h> #define BOX_SIZE 23000 /* size of the data box on one dimension */ /* descriptors for single atom in the tree */ typedef struct atomdesc { double x_pos; double y_pos; double z_pos; } atom; typedef struct hist_entry{ //float min; //float max; unsigned long long d_cnt; /* need a long long type as the count might be huge */ } bucket; bucket * histogram; /* list of all buckets in the histogram */ long long PDH_acnt; /* total number of data points */ int num_buckets; /* total number of buckets in the histogram */ double PDH_res; /* value of w */ atom * atom_list; /* list of all data points */ /* These are for an old way of tracking time */ struct timezone Idunno; struct timeval startTime, endTime; // // Checking for CUDA Error // void checkError(hipError_t e, const char out[]){ if(e != hipSuccess){ printf("There is a CUDA Error: %s, %s \n", out, hipGetErrorString(e)); exit(EXIT_FAILURE); } } // // distance of two points in the atom_list // __device__ double p2p_distance(atom *l, int ind1, int ind2) { double x1 = l[ind1].x_pos; double x2 = l[ind2].x_pos; double y1 = l[ind1].y_pos; double y2 = l[ind2].y_pos; double z1 = l[ind1].z_pos; double z2 = l[ind2].z_pos; return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2)); } // //SDH solution in a single CPU thread // __global__ void PDH_baseline(bucket *histogram_in, atom *list, double width, int size) { int i, j, h_pos; double dist; i = blockIdx.x * blockDim.x + threadIdx.x; j = i + 1; for(int x = j; x < size; x++){ dist = p2p_distance(list,i,x); h_pos = (int) (dist/ width); atomicAdd( &histogram_in[h_pos].d_cnt, 1); } } // // set a checkpoint // and // show running time in seconds // double report_running_time() { long sec_diff, usec_diff; gettimeofday(&endTime, &Idunno); sec_diff = endTime.tv_sec - startTime.tv_sec; usec_diff= endTime.tv_usec-startTime.tv_usec; if(usec_diff < 0) { sec_diff --; usec_diff += 1000000; } printf("Running time for GPU version: %ld.%06lds\n", sec_diff, usec_diff); return (double)(sec_diff*1.0 + usec_diff/1000000.0); } /* brute-force solution in a GPU thread */ __global__ void PDH2D_baseline(bucket *histogram, atom *Atomlist, double w){ int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if(i < j){ double dist = p2p_distance(Atomlist, i, j); int h_pos = (int)(dist / w); histogram[h_pos].d_cnt++; printf("%d, %d : %d, %f \n", i, j, h_pos, dist); } } /* print the counts in all buckets of the histogram */ void output_histogram(bucket *histogram){ int i; long long total_cnt = 0; for(i=0; i< num_buckets; i++) { if(i%5 == 0) /* we print 5 buckets in a row */ printf("\n%02d: ", i); printf("%15lld ", histogram[i].d_cnt); total_cnt += histogram[i].d_cnt; /* we also want to make sure the total distance count is correct */ if(i == num_buckets - 1) printf("\n T:%lld \n", total_cnt); else printf("| "); } } /* MAIN */ int main(int argc, char **argv) { PDH_acnt = atoi(argv[1]); PDH_res = atof(argv[2]); num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1; size_t histogramSize = sizeof(bucket)*num_buckets; size_t atomSize = sizeof(atom)*PDH_acnt; histogram = (bucket *)malloc(histogramSize); atom_list = (atom *)malloc(atomSize); srand(1); /* uniform distribution */ for(int i = 0; i < PDH_acnt; i++) { atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE; atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE; atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE; } /* Malloc Space on Device, copy to Device */ bucket *d_histogram = NULL; atom *d_atom_list = NULL; /* Error Checks */ checkError( hipMalloc((void**) &d_histogram, histogramSize), "Malloc Histogram"); checkError( hipMalloc((void**) &d_atom_list, atomSize), "Malloc Atom List"); checkError( hipMemcpy(d_histogram, histogram, histogramSize, hipMemcpyHostToDevice), "Copy Histogram to Device"); checkError( hipMemcpy(d_atom_list, atom_list, atomSize, hipMemcpyHostToDevice), "Copy Atom_List to Device"); /* start counting time */ gettimeofday(&startTime, &Idunno); /* CUDA Kernel Call */ hipLaunchKernelGGL(( PDH_baseline) , dim3(ceil(PDH_acnt/32)), dim3(32) , 0, 0, d_histogram, d_atom_list, PDH_res, PDH_acnt); /* Checks Cuda Error*/ checkError(hipGetLastError(), "Checking Last Error, Kernel Launch"); checkError( hipMemcpy(histogram, d_histogram, histogramSize, hipMemcpyDeviceToHost), "Copy Device Histogram to Host"); /* check the total running time */ report_running_time(); /* print out the histogram */ output_histogram(histogram); /* Error Checks */ checkError(hipFree(d_histogram), "Free Device Histogram"); checkError(hipFree(d_atom_list), "Free Device Atom_List"); /* Free Memory */ free(histogram); free(atom_list); /* Reset */ checkError(hipDeviceReset(), "Device Reset"); return 0; }
4513ce713957ab727b690d06194c11aff2365a55.cu
/********************************************************************* By : Anfal AlYousufi Course : CIS 6930- Programming Massively Parallel Systems Project : 1 Date : May 26th 2016 Summer 2018 *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <cuda_runtime.h> #define BOX_SIZE 23000 /* size of the data box on one dimension */ /* descriptors for single atom in the tree */ typedef struct atomdesc { double x_pos; double y_pos; double z_pos; } atom; typedef struct hist_entry{ //float min; //float max; unsigned long long d_cnt; /* need a long long type as the count might be huge */ } bucket; bucket * histogram; /* list of all buckets in the histogram */ long long PDH_acnt; /* total number of data points */ int num_buckets; /* total number of buckets in the histogram */ double PDH_res; /* value of w */ atom * atom_list; /* list of all data points */ /* These are for an old way of tracking time */ struct timezone Idunno; struct timeval startTime, endTime; // // Checking for CUDA Error // void checkError(cudaError_t e, const char out[]){ if(e != cudaSuccess){ printf("There is a CUDA Error: %s, %s \n", out, cudaGetErrorString(e)); exit(EXIT_FAILURE); } } // // distance of two points in the atom_list // __device__ double p2p_distance(atom *l, int ind1, int ind2) { double x1 = l[ind1].x_pos; double x2 = l[ind2].x_pos; double y1 = l[ind1].y_pos; double y2 = l[ind2].y_pos; double z1 = l[ind1].z_pos; double z2 = l[ind2].z_pos; return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2)); } // //SDH solution in a single CPU thread // __global__ void PDH_baseline(bucket *histogram_in, atom *list, double width, int size) { int i, j, h_pos; double dist; i = blockIdx.x * blockDim.x + threadIdx.x; j = i + 1; for(int x = j; x < size; x++){ dist = p2p_distance(list,i,x); h_pos = (int) (dist/ width); atomicAdd( &histogram_in[h_pos].d_cnt, 1); } } // // set a checkpoint // and // show running time in seconds // double report_running_time() { long sec_diff, usec_diff; gettimeofday(&endTime, &Idunno); sec_diff = endTime.tv_sec - startTime.tv_sec; usec_diff= endTime.tv_usec-startTime.tv_usec; if(usec_diff < 0) { sec_diff --; usec_diff += 1000000; } printf("Running time for GPU version: %ld.%06lds\n", sec_diff, usec_diff); return (double)(sec_diff*1.0 + usec_diff/1000000.0); } /* brute-force solution in a GPU thread */ __global__ void PDH2D_baseline(bucket *histogram, atom *Atomlist, double w){ int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if(i < j){ double dist = p2p_distance(Atomlist, i, j); int h_pos = (int)(dist / w); histogram[h_pos].d_cnt++; printf("%d, %d : %d, %f \n", i, j, h_pos, dist); } } /* print the counts in all buckets of the histogram */ void output_histogram(bucket *histogram){ int i; long long total_cnt = 0; for(i=0; i< num_buckets; i++) { if(i%5 == 0) /* we print 5 buckets in a row */ printf("\n%02d: ", i); printf("%15lld ", histogram[i].d_cnt); total_cnt += histogram[i].d_cnt; /* we also want to make sure the total distance count is correct */ if(i == num_buckets - 1) printf("\n T:%lld \n", total_cnt); else printf("| "); } } /* MAIN */ int main(int argc, char **argv) { PDH_acnt = atoi(argv[1]); PDH_res = atof(argv[2]); num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1; size_t histogramSize = sizeof(bucket)*num_buckets; size_t atomSize = sizeof(atom)*PDH_acnt; histogram = (bucket *)malloc(histogramSize); atom_list = (atom *)malloc(atomSize); srand(1); /* uniform distribution */ for(int i = 0; i < PDH_acnt; i++) { atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE; atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE; atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE; } /* Malloc Space on Device, copy to Device */ bucket *d_histogram = NULL; atom *d_atom_list = NULL; /* Error Checks */ checkError( cudaMalloc((void**) &d_histogram, histogramSize), "Malloc Histogram"); checkError( cudaMalloc((void**) &d_atom_list, atomSize), "Malloc Atom List"); checkError( cudaMemcpy(d_histogram, histogram, histogramSize, cudaMemcpyHostToDevice), "Copy Histogram to Device"); checkError( cudaMemcpy(d_atom_list, atom_list, atomSize, cudaMemcpyHostToDevice), "Copy Atom_List to Device"); /* start counting time */ gettimeofday(&startTime, &Idunno); /* CUDA Kernel Call */ PDH_baseline <<<ceil(PDH_acnt/32), 32 >>> (d_histogram, d_atom_list, PDH_res, PDH_acnt); /* Checks Cuda Error*/ checkError(cudaGetLastError(), "Checking Last Error, Kernel Launch"); checkError( cudaMemcpy(histogram, d_histogram, histogramSize, cudaMemcpyDeviceToHost), "Copy Device Histogram to Host"); /* check the total running time */ report_running_time(); /* print out the histogram */ output_histogram(histogram); /* Error Checks */ checkError(cudaFree(d_histogram), "Free Device Histogram"); checkError(cudaFree(d_atom_list), "Free Device Atom_List"); /* Free Memory */ free(histogram); free(atom_list); /* Reset */ checkError(cudaDeviceReset(), "Device Reset"); return 0; }
017bbe5476caeb067125484a7a789b07ed8fbe4b.hip
// !!! This is a file automatically generated by hipify!!! #define LIMIT -999 #define BLOCK_SIZE 16 #include <stdio.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "needle_gpu.h" // includes, kernels #include "needle_kernel_diagonal.cu" /* Private structure, do not bother */ struct needle_context { unsigned int gpu_num; char *sequence_set1; char *sequence_set2; unsigned int *pos1; unsigned int *pos2; int *score_matrix; unsigned int *pos_matrix; unsigned int max_pair_no; short penalty; // Grunt work... eww char *d_sequence_set1_h1, *d_sequence_set2_h1, *d_sequence_set1_h2, *d_sequence_set2_h2; unsigned int *d_pos1_h1, *d_pos2_h1, *d_pos_matrix_h1, *d_pos1_h2, *d_pos2_h2, *d_pos_matrix_h2; int *d_score_matrix_h1, *d_score_matrix_h2; hipStream_t *stream1, *stream2; unsigned int half_b, other_half_b, max_length_per_seq; double total_kernel_time, total_memtransfer_time, total_memtransfer_initial_time; char * gpu_name; unsigned int optimal_batch_size; }; int check_mappable_host ( hipDeviceProp_t * p ) { int support = p->canMapHostMemory; if(support == 0) { printf("%s does not support mapping host memory.\n", p->name); } else { printf("%s supports mapping host memory.\n",p->name); } return support; } double gettime(){ struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } void memcpy_and_run ( needle_context * ctx, unsigned int begin, unsigned int end, hipStream_t * stream, char *sequence_set1, char *sequence_set2, char *d_sequence_set1, char *d_sequence_set2, unsigned int *pos1, unsigned int *pos2, unsigned int *d_pos1, unsigned int *d_pos2, int *score_matrix, unsigned int *pos_matrix, int *d_score_matrix, unsigned int *d_pos_matrix, short penalty) { unsigned int batch_size = end-begin; // Memcpy to device double start_marker = 0; start_marker = gettime(); #ifdef VERBOSE printf("-- Start calculation from %d to %d --\n", begin, end); #endif #ifdef DUAL_BUFFERING hipMemcpyAsync( d_sequence_set1, sequence_set1 + pos1[begin], sizeof(char)*(pos1[end] - pos1[begin]), hipMemcpyHostToDevice, *stream); hipMemcpyAsync( d_sequence_set2, sequence_set2 + pos2[begin], sizeof(char)*(pos2[end] - pos2[begin]), hipMemcpyHostToDevice, *stream); hipMemcpyAsync( d_pos1, pos1 /*+ begin*/, sizeof(unsigned int)*(batch_size+1), hipMemcpyHostToDevice, *stream ); hipMemcpyAsync( d_pos2, pos2 /*+ begin*/, sizeof(unsigned int)*(batch_size+1), hipMemcpyHostToDevice, *stream ); hipMemcpyAsync( d_pos_matrix, pos_matrix /*+ begin*/, sizeof(unsigned int)*(batch_size+1), hipMemcpyHostToDevice, *stream ); #else //printf("-- Start calculation from %d to %d cp1 --\n", begin, end); hipMemcpy( d_sequence_set1, sequence_set1 + pos1[begin], sizeof(char)*(pos1[end] - pos1[begin]), hipMemcpyHostToDevice ); //printf("-- Start calculation from %d to %d cp2 --\n", begin, end); hipMemcpy( d_sequence_set2, sequence_set2 + pos2[begin], sizeof(char)*(pos2[end] - pos2[begin]), hipMemcpyHostToDevice ); //printf("-- Start calculation from %d to %d cp3 --\n", begin, end); hipMemcpy( d_pos1, pos1 /*+ begin*/, sizeof(unsigned int)*(batch_size+1), hipMemcpyHostToDevice ); //printf("-- Start calculation from %d to %d cp4 --\n", begin, end); hipMemcpy( d_pos2, pos2 /*+ begin*/, sizeof(unsigned int)*(batch_size+1), hipMemcpyHostToDevice ); //printf("-- Start calculation from %d to %d cp5 --\n", begin, end); hipMemcpy( d_pos_matrix, pos_matrix /*+ begin*/, sizeof(unsigned int)*(batch_size+1), hipMemcpyHostToDevice ); #endif ctx->total_memtransfer_initial_time += gettime() - start_marker; #ifdef VERBOSE printf("\t [%d - %d] Memcpy CPU-GPU: %f\n", begin, end, gettime() - start_marker); #endif start_marker = gettime(); #ifdef DUAL_BUFFERING hipLaunchKernelGGL(( needleman_cuda_diagonal), dim3(batch_size),dim3(512), 0, *stream, d_sequence_set1, d_sequence_set2, d_pos1, d_pos2, d_score_matrix, d_pos_matrix, batch_size, penalty); #else hipLaunchKernelGGL(( needleman_cuda_diagonal), dim3(batch_size),dim3(512), 0, 0, d_sequence_set1, d_sequence_set2, d_pos1, d_pos2, d_score_matrix, d_pos_matrix, batch_size, penalty); #endif //hipDeviceSynchronize(); ctx->total_kernel_time += gettime() - start_marker; #ifdef VERBOSE printf("\t [%d - %d] Kernel: %f\n", begin, end, gettime() - start_marker); #endif start_marker = gettime(); #ifdef DUAL_BUFFERING hipMemcpyAsync( score_matrix + pos_matrix[begin], d_score_matrix, sizeof(int)*(pos_matrix[end] - pos_matrix[begin]), hipMemcpyDeviceToHost, *stream ); #else hipMemcpy( score_matrix + pos_matrix[begin], d_score_matrix, sizeof(int)*(pos_matrix[end] - pos_matrix[begin]), hipMemcpyDeviceToHost ); #endif ctx->total_memtransfer_time += gettime() - start_marker; #ifdef VERBOSE printf("\t [%d - %d] Memcpy GPU-CPU: %f\n", begin, end, gettime() - start_marker); #endif } void needleman_gpu(char *sequence_set1, char *sequence_set2, unsigned int *pos1, unsigned int *pos2, int *score_matrix, unsigned int *pos_matrix, unsigned int max_pair_no, short penalty, char *d_sequence_set1_h1, char *d_sequence_set2_h1, char *d_sequence_set1_h2, char *d_sequence_set2_h2, unsigned int *d_pos1_h1, unsigned int *d_pos2_h1, unsigned int *d_pos_matrix_h1, unsigned int *d_pos1_h2, unsigned int *d_pos2_h2, unsigned int *d_pos_matrix_h2, int *d_score_matrix_h1, int *d_score_matrix_h2, hipStream_t * stream1, hipStream_t * stream2, needle_context * ctx ) { bool done = false; unsigned int start = 0; unsigned int end = 0; bool turn = true; while (!done) { int tmp_batch_sz = turn ? ctx->half_b : ctx->other_half_b; if (start + tmp_batch_sz >= max_pair_no) { end = max_pair_no; done = true; } else { end = start + tmp_batch_sz; } memcpy_and_run ( ctx, start, end, turn ? stream1 : stream2 , sequence_set1, sequence_set2, turn ? d_sequence_set1_h1 : d_sequence_set1_h2, turn ? d_sequence_set2_h1 : d_sequence_set2_h2, pos1, pos2, turn ? d_pos1_h1 : d_pos1_h2, turn ? d_pos2_h1 : d_pos2_h2, score_matrix, pos_matrix, turn ? d_score_matrix_h1 : d_score_matrix_h2, turn ? d_pos_matrix_h1 : d_pos_matrix_h2, penalty); start = end; #ifdef DUAL_BUFFERING turn = !turn; #endif } hipDeviceSynchronize(); printf("__CSV__,%s,%d,%f,%f,%f\n", ctx->gpu_name, ctx->optimal_batch_size, ctx->total_memtransfer_initial_time, ctx->total_kernel_time, ctx->total_memtransfer_time); } void * needle_prepare( const int gpu_num, unsigned int max_length_per_seq, unsigned int * optimal_batch_size, char * gpu_name ) { printf("NEEDLEMAN MODULE PREPPING CPU...\n", 0); hipSetDevice(gpu_num); hipDeviceProp_t * prop = new hipDeviceProp_t; hipGetDeviceProperties(prop, gpu_num); check_mappable_host (prop); strncpy (gpu_name, prop->name, 256); delete prop; size_t freeMem = 0; size_t totalMem = 0; hipMemGetInfo(&freeMem, &totalMem); printf("GPU Memory avaliable: Free: %lu, Total: %lu\n",freeMem/1024/1024, totalMem/1024/1024); unsigned int eachSeqMem = sizeof(char)*max_length_per_seq*2 + sizeof(int)*(max_length_per_seq+1)*(max_length_per_seq+1) + sizeof(unsigned int)*3; unsigned int batch_size = totalMem * 0.7 / eachSeqMem; // Safety reasons... hipStream_t * stream1 = new hipStream_t; hipStream_t * stream2 = new hipStream_t; unsigned int half_b, other_half_b; #ifdef DUAL_BUFFERING half_b = batch_size / 2; other_half_b = batch_size - half_b; hipStreamCreate(stream1); hipStreamCreate(stream2); #else half_b = batch_size; #endif printf("Each batch will be doing this many pairs: %d\n", batch_size); * optimal_batch_size = batch_size; struct needle_context * internal_ctx = new needle_context; internal_ctx->gpu_num = gpu_num; internal_ctx->half_b = half_b; internal_ctx->other_half_b = other_half_b; internal_ctx->gpu_name = (char *) malloc(256); strncpy (internal_ctx->gpu_name, gpu_name, 256); internal_ctx->stream1 = stream1; internal_ctx->stream2 = stream2; internal_ctx->optimal_batch_size = * optimal_batch_size; return (void *) internal_ctx; } void needle_allocate( void *ctx, char *sequence_set1, char *sequence_set2, unsigned int *pos1, unsigned int *pos2, int *score_matrix, unsigned int *pos_matrix ) { double start_marker; // Start time marker struct needle_context *internal_ctx = (needle_context *) ctx; unsigned int half_b, other_half_b; half_b = internal_ctx->half_b; other_half_b = internal_ctx->other_half_b; // First we need to see how to devide the memory... // Query the device capabilities to see how much we can allocate for this problem //////////////////////////////////////////////////////////////////////////// // This implementation comes with the free assumption that // all sequences will be having the same size :'( char *d_sequence_set1_h1, *d_sequence_set2_h1, *d_sequence_set1_h2, *d_sequence_set2_h2; unsigned int *d_pos1_h1, *d_pos2_h1, *d_pos_matrix_h1, *d_pos1_h2, *d_pos2_h2, *d_pos_matrix_h2; int *d_score_matrix_h1, *d_score_matrix_h2; start_marker = gettime(); // Allocating memory for both halves // First half hipMalloc( (void**)&d_sequence_set1_h1, sizeof(char)*(pos1[1]*half_b) ); hipMalloc( (void**)&d_sequence_set2_h1, sizeof(char)*(pos1[1]*half_b)) ; hipMalloc( (void**)&d_score_matrix_h1, sizeof(int)*(pos_matrix[1]*half_b)) ; //hipMalloc( (void**)&d_score_matrix_h1, sizeof(int)*(pos_matrix[1]*half_b)) ; //hipHostGetDevicePointer( (void**)&d_score_matrix_h1, (void *) score_matrix, 0); hipMalloc( (void**)&d_pos1_h1, sizeof(unsigned int)*(half_b+1) ) ; hipMalloc( (void**)&d_pos2_h1, sizeof(unsigned int)*(half_b+1) ) ; hipMalloc( (void**)&d_pos_matrix_h1, sizeof(unsigned int)*(half_b+1) ) ; #ifdef DUAL_BUFFERING // Second half hipMalloc( (void**)&d_sequence_set1_h2, sizeof(char)*(pos1[1]*other_half_b) ); hipMalloc( (void**)&d_sequence_set2_h2, sizeof(char)*(pos2[1]*other_half_b)) ; hipMalloc( (void**)&d_score_matrix_h2, sizeof(int)*(pos_matrix[1]*other_half_b)) ; //hipMalloc( (void**)&d_score_matrix_h2, sizeof(int)*(pos_matrix[1]*other_half_b)) ; //hipHostGetDevicePointer( (void**)&d_score_matrix_h2, (void *) (score_matrix + pos_matrix[half_b]), 0); hipMalloc( (void**)&d_pos1_h2, sizeof(unsigned int)*(other_half_b+1) ); hipMalloc( (void**)&d_pos2_h2, sizeof(unsigned int)*(other_half_b+1) ) ; hipMalloc( (void**)&d_pos_matrix_h2, sizeof(unsigned int)*(other_half_b+1) ) ; #endif fprintf(stdout,"hipMalloc = %f\n", gettime()-start_marker); //////////////////////////////////////////////////////////////////////////// // WARNING BOILERPLATE CODE ! // Jesus, why I'm doing this? - Huan. internal_ctx->sequence_set1 = sequence_set1; internal_ctx->sequence_set2 = sequence_set2; internal_ctx->pos1 = pos1; internal_ctx->pos2 = pos2; internal_ctx->score_matrix = score_matrix; internal_ctx->pos_matrix = pos_matrix; internal_ctx->d_sequence_set1_h1 = d_sequence_set1_h1; internal_ctx->d_sequence_set2_h1 = d_sequence_set2_h1; internal_ctx->d_sequence_set1_h2 = d_sequence_set1_h2; internal_ctx->d_sequence_set2_h2 = d_sequence_set2_h2; internal_ctx->d_pos1_h1 = d_pos1_h1; internal_ctx->d_pos2_h1 = d_pos2_h1; internal_ctx->d_pos_matrix_h1 = d_pos_matrix_h1; internal_ctx->d_pos1_h2 = d_pos1_h2; internal_ctx->d_pos2_h2 = d_pos2_h2; internal_ctx->d_pos_matrix_h2 = d_pos_matrix_h2; internal_ctx->d_score_matrix_h1 = d_score_matrix_h1; internal_ctx->d_score_matrix_h2 = d_score_matrix_h2; internal_ctx->penalty = -10; internal_ctx->total_kernel_time = 0; internal_ctx->total_memtransfer_time = 0; internal_ctx->total_memtransfer_initial_time = 0; printf("-- NEEDLEMAN MODULE INITIALIZING DONE --\n", 0); } void needle_align(void * ctx, int num_pairs) { //////////////////////////////////////////////////////////////////////////// // WARNING BOILERPLATE CODE ! struct needle_context *internal_ctx = (needle_context *) ctx; needleman_gpu( internal_ctx->sequence_set1, internal_ctx->sequence_set2, internal_ctx->pos1, internal_ctx->pos2, internal_ctx->score_matrix, internal_ctx->pos_matrix, num_pairs, internal_ctx->penalty, internal_ctx->d_sequence_set1_h1, internal_ctx->d_sequence_set2_h1, internal_ctx->d_sequence_set1_h2, internal_ctx->d_sequence_set2_h2, internal_ctx->d_pos1_h1, internal_ctx->d_pos2_h1, internal_ctx->d_pos_matrix_h1, internal_ctx->d_pos1_h2, internal_ctx->d_pos2_h2, internal_ctx->d_pos_matrix_h2, internal_ctx->d_score_matrix_h1, internal_ctx->d_score_matrix_h2, internal_ctx->stream1, internal_ctx->stream2, internal_ctx ); } void needle_finalize(void * ctx) { struct needle_context *internal_ctx = static_cast<struct needle_context *>(ctx); hipFree(internal_ctx->d_sequence_set1_h1); hipFree(internal_ctx->d_sequence_set2_h1); hipFree(internal_ctx->d_pos1_h1); hipFree(internal_ctx->d_pos2_h2); hipFree(internal_ctx->d_pos_matrix_h1); hipFree(internal_ctx->d_score_matrix_h1); #ifdef DUAL_BUFFERING hipFree(internal_ctx->d_sequence_set1_h2); hipFree(internal_ctx->d_sequence_set2_h2); hipFree(internal_ctx->d_pos1_h2); hipFree(internal_ctx->d_pos2_h2); hipFree(internal_ctx->d_pos_matrix_h2); hipFree(internal_ctx->d_score_matrix_h2); hipStreamDestroy(*(internal_ctx->stream1)); hipStreamDestroy(*(internal_ctx->stream2)); #endif delete internal_ctx->gpu_name; delete internal_ctx->stream1; delete internal_ctx->stream2; delete(internal_ctx); }
017bbe5476caeb067125484a7a789b07ed8fbe4b.cu
#define LIMIT -999 #define BLOCK_SIZE 16 #include <stdio.h> #include <cuda.h> #include <sys/time.h> #include "needle_gpu.h" // includes, kernels #include "needle_kernel_diagonal.cu" /* Private structure, do not bother */ struct needle_context { unsigned int gpu_num; char *sequence_set1; char *sequence_set2; unsigned int *pos1; unsigned int *pos2; int *score_matrix; unsigned int *pos_matrix; unsigned int max_pair_no; short penalty; // Grunt work... eww char *d_sequence_set1_h1, *d_sequence_set2_h1, *d_sequence_set1_h2, *d_sequence_set2_h2; unsigned int *d_pos1_h1, *d_pos2_h1, *d_pos_matrix_h1, *d_pos1_h2, *d_pos2_h2, *d_pos_matrix_h2; int *d_score_matrix_h1, *d_score_matrix_h2; cudaStream_t *stream1, *stream2; unsigned int half_b, other_half_b, max_length_per_seq; double total_kernel_time, total_memtransfer_time, total_memtransfer_initial_time; char * gpu_name; unsigned int optimal_batch_size; }; int check_mappable_host ( cudaDeviceProp * p ) { int support = p->canMapHostMemory; if(support == 0) { printf("%s does not support mapping host memory.\n", p->name); } else { printf("%s supports mapping host memory.\n",p->name); } return support; } double gettime(){ struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } void memcpy_and_run ( needle_context * ctx, unsigned int begin, unsigned int end, cudaStream_t * stream, char *sequence_set1, char *sequence_set2, char *d_sequence_set1, char *d_sequence_set2, unsigned int *pos1, unsigned int *pos2, unsigned int *d_pos1, unsigned int *d_pos2, int *score_matrix, unsigned int *pos_matrix, int *d_score_matrix, unsigned int *d_pos_matrix, short penalty) { unsigned int batch_size = end-begin; // Memcpy to device double start_marker = 0; start_marker = gettime(); #ifdef VERBOSE printf("-- Start calculation from %d to %d --\n", begin, end); #endif #ifdef DUAL_BUFFERING cudaMemcpyAsync( d_sequence_set1, sequence_set1 + pos1[begin], sizeof(char)*(pos1[end] - pos1[begin]), cudaMemcpyHostToDevice, *stream); cudaMemcpyAsync( d_sequence_set2, sequence_set2 + pos2[begin], sizeof(char)*(pos2[end] - pos2[begin]), cudaMemcpyHostToDevice, *stream); cudaMemcpyAsync( d_pos1, pos1 /*+ begin*/, sizeof(unsigned int)*(batch_size+1), cudaMemcpyHostToDevice, *stream ); cudaMemcpyAsync( d_pos2, pos2 /*+ begin*/, sizeof(unsigned int)*(batch_size+1), cudaMemcpyHostToDevice, *stream ); cudaMemcpyAsync( d_pos_matrix, pos_matrix /*+ begin*/, sizeof(unsigned int)*(batch_size+1), cudaMemcpyHostToDevice, *stream ); #else //printf("-- Start calculation from %d to %d cp1 --\n", begin, end); cudaMemcpy( d_sequence_set1, sequence_set1 + pos1[begin], sizeof(char)*(pos1[end] - pos1[begin]), cudaMemcpyHostToDevice ); //printf("-- Start calculation from %d to %d cp2 --\n", begin, end); cudaMemcpy( d_sequence_set2, sequence_set2 + pos2[begin], sizeof(char)*(pos2[end] - pos2[begin]), cudaMemcpyHostToDevice ); //printf("-- Start calculation from %d to %d cp3 --\n", begin, end); cudaMemcpy( d_pos1, pos1 /*+ begin*/, sizeof(unsigned int)*(batch_size+1), cudaMemcpyHostToDevice ); //printf("-- Start calculation from %d to %d cp4 --\n", begin, end); cudaMemcpy( d_pos2, pos2 /*+ begin*/, sizeof(unsigned int)*(batch_size+1), cudaMemcpyHostToDevice ); //printf("-- Start calculation from %d to %d cp5 --\n", begin, end); cudaMemcpy( d_pos_matrix, pos_matrix /*+ begin*/, sizeof(unsigned int)*(batch_size+1), cudaMemcpyHostToDevice ); #endif ctx->total_memtransfer_initial_time += gettime() - start_marker; #ifdef VERBOSE printf("\t [%d - %d] Memcpy CPU-GPU: %f\n", begin, end, gettime() - start_marker); #endif start_marker = gettime(); #ifdef DUAL_BUFFERING needleman_cuda_diagonal<<<batch_size,512, 0, *stream>>>(d_sequence_set1, d_sequence_set2, d_pos1, d_pos2, d_score_matrix, d_pos_matrix, batch_size, penalty); #else needleman_cuda_diagonal<<<batch_size,512>>>(d_sequence_set1, d_sequence_set2, d_pos1, d_pos2, d_score_matrix, d_pos_matrix, batch_size, penalty); #endif //cudaDeviceSynchronize(); ctx->total_kernel_time += gettime() - start_marker; #ifdef VERBOSE printf("\t [%d - %d] Kernel: %f\n", begin, end, gettime() - start_marker); #endif start_marker = gettime(); #ifdef DUAL_BUFFERING cudaMemcpyAsync( score_matrix + pos_matrix[begin], d_score_matrix, sizeof(int)*(pos_matrix[end] - pos_matrix[begin]), cudaMemcpyDeviceToHost, *stream ); #else cudaMemcpy( score_matrix + pos_matrix[begin], d_score_matrix, sizeof(int)*(pos_matrix[end] - pos_matrix[begin]), cudaMemcpyDeviceToHost ); #endif ctx->total_memtransfer_time += gettime() - start_marker; #ifdef VERBOSE printf("\t [%d - %d] Memcpy GPU-CPU: %f\n", begin, end, gettime() - start_marker); #endif } void needleman_gpu(char *sequence_set1, char *sequence_set2, unsigned int *pos1, unsigned int *pos2, int *score_matrix, unsigned int *pos_matrix, unsigned int max_pair_no, short penalty, char *d_sequence_set1_h1, char *d_sequence_set2_h1, char *d_sequence_set1_h2, char *d_sequence_set2_h2, unsigned int *d_pos1_h1, unsigned int *d_pos2_h1, unsigned int *d_pos_matrix_h1, unsigned int *d_pos1_h2, unsigned int *d_pos2_h2, unsigned int *d_pos_matrix_h2, int *d_score_matrix_h1, int *d_score_matrix_h2, cudaStream_t * stream1, cudaStream_t * stream2, needle_context * ctx ) { bool done = false; unsigned int start = 0; unsigned int end = 0; bool turn = true; while (!done) { int tmp_batch_sz = turn ? ctx->half_b : ctx->other_half_b; if (start + tmp_batch_sz >= max_pair_no) { end = max_pair_no; done = true; } else { end = start + tmp_batch_sz; } memcpy_and_run ( ctx, start, end, turn ? stream1 : stream2 , sequence_set1, sequence_set2, turn ? d_sequence_set1_h1 : d_sequence_set1_h2, turn ? d_sequence_set2_h1 : d_sequence_set2_h2, pos1, pos2, turn ? d_pos1_h1 : d_pos1_h2, turn ? d_pos2_h1 : d_pos2_h2, score_matrix, pos_matrix, turn ? d_score_matrix_h1 : d_score_matrix_h2, turn ? d_pos_matrix_h1 : d_pos_matrix_h2, penalty); start = end; #ifdef DUAL_BUFFERING turn = !turn; #endif } cudaDeviceSynchronize(); printf("__CSV__,%s,%d,%f,%f,%f\n", ctx->gpu_name, ctx->optimal_batch_size, ctx->total_memtransfer_initial_time, ctx->total_kernel_time, ctx->total_memtransfer_time); } void * needle_prepare( const int gpu_num, unsigned int max_length_per_seq, unsigned int * optimal_batch_size, char * gpu_name ) { printf("NEEDLEMAN MODULE PREPPING CPU...\n", 0); cudaSetDevice(gpu_num); cudaDeviceProp * prop = new cudaDeviceProp; cudaGetDeviceProperties(prop, gpu_num); check_mappable_host (prop); strncpy (gpu_name, prop->name, 256); delete prop; size_t freeMem = 0; size_t totalMem = 0; cudaMemGetInfo(&freeMem, &totalMem); printf("GPU Memory avaliable: Free: %lu, Total: %lu\n",freeMem/1024/1024, totalMem/1024/1024); unsigned int eachSeqMem = sizeof(char)*max_length_per_seq*2 + sizeof(int)*(max_length_per_seq+1)*(max_length_per_seq+1) + sizeof(unsigned int)*3; unsigned int batch_size = totalMem * 0.7 / eachSeqMem; // Safety reasons... cudaStream_t * stream1 = new cudaStream_t; cudaStream_t * stream2 = new cudaStream_t; unsigned int half_b, other_half_b; #ifdef DUAL_BUFFERING half_b = batch_size / 2; other_half_b = batch_size - half_b; cudaStreamCreate(stream1); cudaStreamCreate(stream2); #else half_b = batch_size; #endif printf("Each batch will be doing this many pairs: %d\n", batch_size); * optimal_batch_size = batch_size; struct needle_context * internal_ctx = new needle_context; internal_ctx->gpu_num = gpu_num; internal_ctx->half_b = half_b; internal_ctx->other_half_b = other_half_b; internal_ctx->gpu_name = (char *) malloc(256); strncpy (internal_ctx->gpu_name, gpu_name, 256); internal_ctx->stream1 = stream1; internal_ctx->stream2 = stream2; internal_ctx->optimal_batch_size = * optimal_batch_size; return (void *) internal_ctx; } void needle_allocate( void *ctx, char *sequence_set1, char *sequence_set2, unsigned int *pos1, unsigned int *pos2, int *score_matrix, unsigned int *pos_matrix ) { double start_marker; // Start time marker struct needle_context *internal_ctx = (needle_context *) ctx; unsigned int half_b, other_half_b; half_b = internal_ctx->half_b; other_half_b = internal_ctx->other_half_b; // First we need to see how to devide the memory... // Query the device capabilities to see how much we can allocate for this problem //////////////////////////////////////////////////////////////////////////// // This implementation comes with the free assumption that // all sequences will be having the same size :'( char *d_sequence_set1_h1, *d_sequence_set2_h1, *d_sequence_set1_h2, *d_sequence_set2_h2; unsigned int *d_pos1_h1, *d_pos2_h1, *d_pos_matrix_h1, *d_pos1_h2, *d_pos2_h2, *d_pos_matrix_h2; int *d_score_matrix_h1, *d_score_matrix_h2; start_marker = gettime(); // Allocating memory for both halves // First half cudaMalloc( (void**)&d_sequence_set1_h1, sizeof(char)*(pos1[1]*half_b) ); cudaMalloc( (void**)&d_sequence_set2_h1, sizeof(char)*(pos1[1]*half_b)) ; cudaMalloc( (void**)&d_score_matrix_h1, sizeof(int)*(pos_matrix[1]*half_b)) ; //cudaMalloc( (void**)&d_score_matrix_h1, sizeof(int)*(pos_matrix[1]*half_b)) ; //cudaHostGetDevicePointer( (void**)&d_score_matrix_h1, (void *) score_matrix, 0); cudaMalloc( (void**)&d_pos1_h1, sizeof(unsigned int)*(half_b+1) ) ; cudaMalloc( (void**)&d_pos2_h1, sizeof(unsigned int)*(half_b+1) ) ; cudaMalloc( (void**)&d_pos_matrix_h1, sizeof(unsigned int)*(half_b+1) ) ; #ifdef DUAL_BUFFERING // Second half cudaMalloc( (void**)&d_sequence_set1_h2, sizeof(char)*(pos1[1]*other_half_b) ); cudaMalloc( (void**)&d_sequence_set2_h2, sizeof(char)*(pos2[1]*other_half_b)) ; cudaMalloc( (void**)&d_score_matrix_h2, sizeof(int)*(pos_matrix[1]*other_half_b)) ; //cudaMalloc( (void**)&d_score_matrix_h2, sizeof(int)*(pos_matrix[1]*other_half_b)) ; //cudaHostGetDevicePointer( (void**)&d_score_matrix_h2, (void *) (score_matrix + pos_matrix[half_b]), 0); cudaMalloc( (void**)&d_pos1_h2, sizeof(unsigned int)*(other_half_b+1) ); cudaMalloc( (void**)&d_pos2_h2, sizeof(unsigned int)*(other_half_b+1) ) ; cudaMalloc( (void**)&d_pos_matrix_h2, sizeof(unsigned int)*(other_half_b+1) ) ; #endif fprintf(stdout,"cudaMalloc = %f\n", gettime()-start_marker); //////////////////////////////////////////////////////////////////////////// // WARNING BOILERPLATE CODE ! // Jesus, why I'm doing this? - Huan. internal_ctx->sequence_set1 = sequence_set1; internal_ctx->sequence_set2 = sequence_set2; internal_ctx->pos1 = pos1; internal_ctx->pos2 = pos2; internal_ctx->score_matrix = score_matrix; internal_ctx->pos_matrix = pos_matrix; internal_ctx->d_sequence_set1_h1 = d_sequence_set1_h1; internal_ctx->d_sequence_set2_h1 = d_sequence_set2_h1; internal_ctx->d_sequence_set1_h2 = d_sequence_set1_h2; internal_ctx->d_sequence_set2_h2 = d_sequence_set2_h2; internal_ctx->d_pos1_h1 = d_pos1_h1; internal_ctx->d_pos2_h1 = d_pos2_h1; internal_ctx->d_pos_matrix_h1 = d_pos_matrix_h1; internal_ctx->d_pos1_h2 = d_pos1_h2; internal_ctx->d_pos2_h2 = d_pos2_h2; internal_ctx->d_pos_matrix_h2 = d_pos_matrix_h2; internal_ctx->d_score_matrix_h1 = d_score_matrix_h1; internal_ctx->d_score_matrix_h2 = d_score_matrix_h2; internal_ctx->penalty = -10; internal_ctx->total_kernel_time = 0; internal_ctx->total_memtransfer_time = 0; internal_ctx->total_memtransfer_initial_time = 0; printf("-- NEEDLEMAN MODULE INITIALIZING DONE --\n", 0); } void needle_align(void * ctx, int num_pairs) { //////////////////////////////////////////////////////////////////////////// // WARNING BOILERPLATE CODE ! struct needle_context *internal_ctx = (needle_context *) ctx; needleman_gpu( internal_ctx->sequence_set1, internal_ctx->sequence_set2, internal_ctx->pos1, internal_ctx->pos2, internal_ctx->score_matrix, internal_ctx->pos_matrix, num_pairs, internal_ctx->penalty, internal_ctx->d_sequence_set1_h1, internal_ctx->d_sequence_set2_h1, internal_ctx->d_sequence_set1_h2, internal_ctx->d_sequence_set2_h2, internal_ctx->d_pos1_h1, internal_ctx->d_pos2_h1, internal_ctx->d_pos_matrix_h1, internal_ctx->d_pos1_h2, internal_ctx->d_pos2_h2, internal_ctx->d_pos_matrix_h2, internal_ctx->d_score_matrix_h1, internal_ctx->d_score_matrix_h2, internal_ctx->stream1, internal_ctx->stream2, internal_ctx ); } void needle_finalize(void * ctx) { struct needle_context *internal_ctx = static_cast<struct needle_context *>(ctx); cudaFree(internal_ctx->d_sequence_set1_h1); cudaFree(internal_ctx->d_sequence_set2_h1); cudaFree(internal_ctx->d_pos1_h1); cudaFree(internal_ctx->d_pos2_h2); cudaFree(internal_ctx->d_pos_matrix_h1); cudaFree(internal_ctx->d_score_matrix_h1); #ifdef DUAL_BUFFERING cudaFree(internal_ctx->d_sequence_set1_h2); cudaFree(internal_ctx->d_sequence_set2_h2); cudaFree(internal_ctx->d_pos1_h2); cudaFree(internal_ctx->d_pos2_h2); cudaFree(internal_ctx->d_pos_matrix_h2); cudaFree(internal_ctx->d_score_matrix_h2); cudaStreamDestroy(*(internal_ctx->stream1)); cudaStreamDestroy(*(internal_ctx->stream2)); #endif delete internal_ctx->gpu_name; delete internal_ctx->stream1; delete internal_ctx->stream2; delete(internal_ctx); }
423d7715585b277f83a8c3b879620f3d01380a5e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cumo_na_diagonal_index_stride_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t *idx = NULL; hipMalloc(&idx, XSIZE*YSIZE); size_t *idx0 = NULL; hipMalloc(&idx0, XSIZE*YSIZE); ssize_t s1 = 1; size_t k0 = 1; size_t k1 = 1; uint64_t n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cumo_na_diagonal_index_stride_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, idx,idx0,s1,k0,k1,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cumo_na_diagonal_index_stride_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, idx,idx0,s1,k0,k1,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cumo_na_diagonal_index_stride_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, idx,idx0,s1,k0,k1,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
423d7715585b277f83a8c3b879620f3d01380a5e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cumo_na_diagonal_index_stride_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t *idx = NULL; cudaMalloc(&idx, XSIZE*YSIZE); size_t *idx0 = NULL; cudaMalloc(&idx0, XSIZE*YSIZE); ssize_t s1 = 1; size_t k0 = 1; size_t k1 = 1; uint64_t n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cumo_na_diagonal_index_stride_kernel<<<gridBlock,threadBlock>>>(idx,idx0,s1,k0,k1,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cumo_na_diagonal_index_stride_kernel<<<gridBlock,threadBlock>>>(idx,idx0,s1,k0,k1,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cumo_na_diagonal_index_stride_kernel<<<gridBlock,threadBlock>>>(idx,idx0,s1,k0,k1,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f99dc73e1461bbcbf2975af7a0e9f77ee8742726.hip
// !!! This is a file automatically generated by hipify!!! extern "C" { #include "../shape/head.h" } #define GOLD 1.618034 #define GLIMIT 100.0 #define TINY 1.0e-20 #define SIGN(a,b) ((b) > 0.0 ? fabs(a) : -fabs(a)) #define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d); __host__ void mnbrak_gpu( double *ax, double *bx, double *cx, double *fa, double *fb, double *fc, double (*func) (double, struct vertices_t**, unsigned char*, unsigned char*, int*, int*, int*, int, int, hipStream_t*, double**), struct vertices_t **verts, unsigned char *htype, unsigned char *dtype, int *nframes, int *nviews, int *lc_n, int nsets, int nf, hipStream_t *bf_stream, double **fit_overflow) { double ulim,u,r,q,fu,dum; *fa = (*func)(*ax, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); *fb = (*func)(*bx, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); if (*fb > *fa) { SHFT(dum,*ax,*bx,dum) SHFT(dum,*fb,*fa,dum) } *cx = (*bx)+GOLD*(*bx-*ax); *fc = (*func)(*cx, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); while (*fb > *fc) { r = (*bx-*ax) * (*fb-*fc); q = (*bx-*cx) * (*fb-*fa); u = (*bx) - ((*bx-*cx)*q - (*bx-*ax)*r)/ (2.0 * SIGN( MAX( fabs(q-r),TINY), q-r)); ulim = (*bx) + GLIMIT * (*cx-*bx); if ((*bx-u)*(u-*cx) > 0.0) { fu=(*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); if (fu < *fc) { *ax=(*bx); *bx=u; *fa=(*fb); *fb=fu; return; } else if (fu > *fb) { *cx = u; *fc = fu; return; } u = (*cx) + GOLD * (*cx-*bx); fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); } else if ((*cx-u) * (u-ulim) > 0.0) { fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); if (fu < *fc) { SHFT(*bx,*cx,u,*cx+GOLD*(*cx-*bx)) SHFT(*fb,*fc,fu,(*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow)) } } else if ((u-ulim)*(ulim-*cx) >= 0.0) { u = ulim; fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); } else { u = (*cx)+GOLD*(*cx-*bx); fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); } SHFT(*ax,*bx,*cx,u) SHFT(*fa,*fb,*fc,fu) } } #undef GOLD #undef GLIMIT #undef TINY #undef MAX #undef SIGN #undef SHFT
f99dc73e1461bbcbf2975af7a0e9f77ee8742726.cu
extern "C" { #include "../shape/head.h" } #define GOLD 1.618034 #define GLIMIT 100.0 #define TINY 1.0e-20 #define SIGN(a,b) ((b) > 0.0 ? fabs(a) : -fabs(a)) #define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d); __host__ void mnbrak_gpu( double *ax, double *bx, double *cx, double *fa, double *fb, double *fc, double (*func) (double, struct vertices_t**, unsigned char*, unsigned char*, int*, int*, int*, int, int, cudaStream_t*, double**), struct vertices_t **verts, unsigned char *htype, unsigned char *dtype, int *nframes, int *nviews, int *lc_n, int nsets, int nf, cudaStream_t *bf_stream, double **fit_overflow) { double ulim,u,r,q,fu,dum; *fa = (*func)(*ax, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); *fb = (*func)(*bx, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); if (*fb > *fa) { SHFT(dum,*ax,*bx,dum) SHFT(dum,*fb,*fa,dum) } *cx = (*bx)+GOLD*(*bx-*ax); *fc = (*func)(*cx, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); while (*fb > *fc) { r = (*bx-*ax) * (*fb-*fc); q = (*bx-*cx) * (*fb-*fa); u = (*bx) - ((*bx-*cx)*q - (*bx-*ax)*r)/ (2.0 * SIGN( MAX( fabs(q-r),TINY), q-r)); ulim = (*bx) + GLIMIT * (*cx-*bx); if ((*bx-u)*(u-*cx) > 0.0) { fu=(*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); if (fu < *fc) { *ax=(*bx); *bx=u; *fa=(*fb); *fb=fu; return; } else if (fu > *fb) { *cx = u; *fc = fu; return; } u = (*cx) + GOLD * (*cx-*bx); fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); } else if ((*cx-u) * (u-ulim) > 0.0) { fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); if (fu < *fc) { SHFT(*bx,*cx,u,*cx+GOLD*(*cx-*bx)) SHFT(*fb,*fc,fu,(*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow)) } } else if ((u-ulim)*(ulim-*cx) >= 0.0) { u = ulim; fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); } else { u = (*cx)+GOLD*(*cx-*bx); fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); } SHFT(*ax,*bx,*cx,u) SHFT(*fa,*fb,*fc,fu) } } #undef GOLD #undef GLIMIT #undef TINY #undef MAX #undef SIGN #undef SHFT
7a02ed79a584a4e79d7c8d2f8a031ab8e64da48b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* * Walsh transforms belong to a class of generalized Fourier transformations. * They have applications in various fields of electrical engineering * and numeric theory. In this sample we demonstrate efficient implementation * of naturally-ordered Walsh transform * (also known as Walsh-Hadamard or Hadamard transform) in CUDA and its * particular application to dyadic convolution computation. * Refer to excellent Jorg Arndt's "Algorithms for Programmers" textbook * http://www.jjj.de/fxt/fxtbook.pdf (Chapter 22) * * Victor Podlozhnyuk ([email protected]) */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cutil_inline.h> //////////////////////////////////////////////////////////////////////////////// // Reference CPU FWT //////////////////////////////////////////////////////////////////////////////// extern"C" void fwtCPU(float *h_Output, float *h_Input, int log2N); extern"C" void slowWTcpu(float *h_Output, float *h_Input, int log2N); extern "C" void dyadicConvolutionCPU( float *h_Result, float *h_Data, float *h_Kernel, int log2dataN, int log2kernelN ); //////////////////////////////////////////////////////////////////////////////// // GPU FWT //////////////////////////////////////////////////////////////////////////////// #include "fastWalshTransform_kernel.cu" //////////////////////////////////////////////////////////////////////////////// // Data configuration //////////////////////////////////////////////////////////////////////////////// const int log2Kernel = 7; #ifndef __DEVICE_EMULATION__ const int log2Data = 23; #else const int log2Data = 15; #endif const int dataN = 1 << log2Data; const int kernelN = 1 << log2Kernel; const int DATA_SIZE = dataN * sizeof(float); const int KERNEL_SIZE = kernelN * sizeof(float); const double NOPS = 3.0 * (double)dataN * (double)log2Data / 2.0; //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]){ float *h_Data, *h_Kernel, *h_ResultCPU, *h_ResultGPU; float *d_Data, *d_Kernel; double delta, ref, sum_delta2, sum_ref2, L2norm, gpuTime; unsigned int hTimer; int i; // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else hipSetDevice( cutGetMaxGflopsDeviceId() ); cutilCheckError( cutCreateTimer(&hTimer) ); printf("Initializing data...\n"); printf("...allocating CPU memory\n"); cutilSafeMalloc( h_Kernel = (float *)malloc(KERNEL_SIZE) ); cutilSafeMalloc( h_Data = (float *)malloc(DATA_SIZE) ); cutilSafeMalloc( h_ResultCPU = (float *)malloc(DATA_SIZE) ); cutilSafeMalloc( h_ResultGPU = (float *)malloc(DATA_SIZE) ); printf("...allocating GPU memory\n"); cutilSafeCall( hipMalloc((void **)&d_Kernel, DATA_SIZE) ); cutilSafeCall( hipMalloc((void **)&d_Data, DATA_SIZE) ); printf("...generating data\n"); printf("Data length: %i; kernel length: %i\n", dataN, kernelN); srand(2007); for (i = 0; i < kernelN; i++) h_Kernel[i] = (float)rand() / (float)RAND_MAX; for (i = 0; i < dataN; i++) h_Data[i] = (float)rand() / (float)RAND_MAX; cutilSafeCall( hipMemset(d_Kernel, 0, DATA_SIZE) ); cutilSafeCall( hipMemcpy(d_Kernel, h_Kernel, KERNEL_SIZE, hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(d_Data, h_Data, DATA_SIZE, hipMemcpyHostToDevice) ); printf("Running GPU dyadic convolution using Fast Walsh Transform...\n"); cutilSafeCall( hipDeviceSynchronize() ); cutilCheckError( cutResetTimer(hTimer) ); cutilCheckError( cutStartTimer(hTimer) ); fwtBatchGPU(d_Data, 1, log2Data); fwtBatchGPU(d_Kernel, 1, log2Data); modulateGPU(d_Data, d_Kernel, dataN); fwtBatchGPU(d_Data, 1, log2Data); cutilSafeCall( hipDeviceSynchronize() ); cutilCheckError( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("GPU time: %f ms; GOP/s: %f\n", gpuTime, NOPS / (gpuTime * 0.001 * 1E+9)); printf("Reading back GPU results...\n"); cutilSafeCall( hipMemcpy(h_ResultGPU, d_Data, DATA_SIZE, hipMemcpyDeviceToHost) ); printf("Running straightforward CPU dyadic convolution...\n"); dyadicConvolutionCPU(h_ResultCPU, h_Data, h_Kernel, log2Data, log2Kernel); printf("Comparing the results...\n"); sum_delta2 = 0; sum_ref2 = 0; for(i = 0; i < dataN; i++){ delta = h_ResultCPU[i] - h_ResultGPU[i]; ref = h_ResultCPU[i]; sum_delta2 += delta * delta; sum_ref2 += ref * ref; } L2norm = sqrt(sum_delta2 / sum_ref2); printf("L2 norm: %E\n", L2norm); printf((L2norm < 1e-6) ? "TEST PASSED\n" : "TEST FAILED\n"); printf("Shutting down...\n"); cutilCheckError( cutDeleteTimer(hTimer) ); cutilSafeCall( hipFree(d_Data) ); cutilSafeCall( hipFree(d_Kernel) ); free(h_ResultGPU); free(h_ResultCPU); free(h_Data); free(h_Kernel); hipDeviceReset(); cutilExit(argc, argv); }
7a02ed79a584a4e79d7c8d2f8a031ab8e64da48b.cu
/* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* * Walsh transforms belong to a class of generalized Fourier transformations. * They have applications in various fields of electrical engineering * and numeric theory. In this sample we demonstrate efficient implementation * of naturally-ordered Walsh transform * (also known as Walsh-Hadamard or Hadamard transform) in CUDA and its * particular application to dyadic convolution computation. * Refer to excellent Jorg Arndt's "Algorithms for Programmers" textbook * http://www.jjj.de/fxt/fxtbook.pdf (Chapter 22) * * Victor Podlozhnyuk ([email protected]) */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cutil_inline.h> //////////////////////////////////////////////////////////////////////////////// // Reference CPU FWT //////////////////////////////////////////////////////////////////////////////// extern"C" void fwtCPU(float *h_Output, float *h_Input, int log2N); extern"C" void slowWTcpu(float *h_Output, float *h_Input, int log2N); extern "C" void dyadicConvolutionCPU( float *h_Result, float *h_Data, float *h_Kernel, int log2dataN, int log2kernelN ); //////////////////////////////////////////////////////////////////////////////// // GPU FWT //////////////////////////////////////////////////////////////////////////////// #include "fastWalshTransform_kernel.cu" //////////////////////////////////////////////////////////////////////////////// // Data configuration //////////////////////////////////////////////////////////////////////////////// const int log2Kernel = 7; #ifndef __DEVICE_EMULATION__ const int log2Data = 23; #else const int log2Data = 15; #endif const int dataN = 1 << log2Data; const int kernelN = 1 << log2Kernel; const int DATA_SIZE = dataN * sizeof(float); const int KERNEL_SIZE = kernelN * sizeof(float); const double NOPS = 3.0 * (double)dataN * (double)log2Data / 2.0; //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]){ float *h_Data, *h_Kernel, *h_ResultCPU, *h_ResultGPU; float *d_Data, *d_Kernel; double delta, ref, sum_delta2, sum_ref2, L2norm, gpuTime; unsigned int hTimer; int i; // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else cudaSetDevice( cutGetMaxGflopsDeviceId() ); cutilCheckError( cutCreateTimer(&hTimer) ); printf("Initializing data...\n"); printf("...allocating CPU memory\n"); cutilSafeMalloc( h_Kernel = (float *)malloc(KERNEL_SIZE) ); cutilSafeMalloc( h_Data = (float *)malloc(DATA_SIZE) ); cutilSafeMalloc( h_ResultCPU = (float *)malloc(DATA_SIZE) ); cutilSafeMalloc( h_ResultGPU = (float *)malloc(DATA_SIZE) ); printf("...allocating GPU memory\n"); cutilSafeCall( cudaMalloc((void **)&d_Kernel, DATA_SIZE) ); cutilSafeCall( cudaMalloc((void **)&d_Data, DATA_SIZE) ); printf("...generating data\n"); printf("Data length: %i; kernel length: %i\n", dataN, kernelN); srand(2007); for (i = 0; i < kernelN; i++) h_Kernel[i] = (float)rand() / (float)RAND_MAX; for (i = 0; i < dataN; i++) h_Data[i] = (float)rand() / (float)RAND_MAX; cutilSafeCall( cudaMemset(d_Kernel, 0, DATA_SIZE) ); cutilSafeCall( cudaMemcpy(d_Kernel, h_Kernel, KERNEL_SIZE, cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(d_Data, h_Data, DATA_SIZE, cudaMemcpyHostToDevice) ); printf("Running GPU dyadic convolution using Fast Walsh Transform...\n"); cutilSafeCall( cudaThreadSynchronize() ); cutilCheckError( cutResetTimer(hTimer) ); cutilCheckError( cutStartTimer(hTimer) ); fwtBatchGPU(d_Data, 1, log2Data); fwtBatchGPU(d_Kernel, 1, log2Data); modulateGPU(d_Data, d_Kernel, dataN); fwtBatchGPU(d_Data, 1, log2Data); cutilSafeCall( cudaThreadSynchronize() ); cutilCheckError( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("GPU time: %f ms; GOP/s: %f\n", gpuTime, NOPS / (gpuTime * 0.001 * 1E+9)); printf("Reading back GPU results...\n"); cutilSafeCall( cudaMemcpy(h_ResultGPU, d_Data, DATA_SIZE, cudaMemcpyDeviceToHost) ); printf("Running straightforward CPU dyadic convolution...\n"); dyadicConvolutionCPU(h_ResultCPU, h_Data, h_Kernel, log2Data, log2Kernel); printf("Comparing the results...\n"); sum_delta2 = 0; sum_ref2 = 0; for(i = 0; i < dataN; i++){ delta = h_ResultCPU[i] - h_ResultGPU[i]; ref = h_ResultCPU[i]; sum_delta2 += delta * delta; sum_ref2 += ref * ref; } L2norm = sqrt(sum_delta2 / sum_ref2); printf("L2 norm: %E\n", L2norm); printf((L2norm < 1e-6) ? "TEST PASSED\n" : "TEST FAILED\n"); printf("Shutting down...\n"); cutilCheckError( cutDeleteTimer(hTimer) ); cutilSafeCall( cudaFree(d_Data) ); cutilSafeCall( cudaFree(d_Kernel) ); free(h_ResultGPU); free(h_ResultCPU); free(h_Data); free(h_Kernel); cudaThreadExit(); cutilExit(argc, argv); }
36f8c12dac8d615e9a1c69f0d8f3904491e052f3.hip
// !!! This is a file automatically generated by hipify!!! /*------------------------------------------------------------------------ Python extension for CUDA routines used for voxel-driven scatter modelling (VSM) author: Pawel Markiewicz Copyrights: 2018 ------------------------------------------------------------------------*/ #include <Python.h> #include <stdlib.h> #include <numpy/arrayobject.h> #include "def.h" #include "sct.h" #include "sctaux.h" //=== PYTHON STUFF === //--- Docstrings static char module_docstring[] = "This module provides an interface for single scatter modelling."; static char scatter_docstring[] = "Estimates scatter event sinograms using mu-map and emission image (estimate)."; //--- Available functions static PyObject *mmr_scat(PyObject *self, PyObject *args); /* Module specification */ static PyMethodDef module_methods[] = { { "scatter", mmr_scat, METH_VARARGS, scatter_docstring }, { NULL, NULL, 0, NULL } }; //--- //--- Initialize the module PyMODINIT_FUNC initpetsct(void) //it HAS to be init______ and then the name of the shared lib. { PyObject *m = Py_InitModule3("petsct", module_methods, module_docstring); if (m == NULL) return; /* Load NumPy functionality. */ import_array(); } //--- //======================= //====================================================================================== // E S T I M A T I N G S C A T T E R E V E N T S //-------------------------------------------------------------------------------------- static PyObject *mmr_scat(PyObject *self, PyObject *args) { //Structure of constants Cnst Cnt; //Dictionary of scanner constants PyObject * o_mmrcnst; //Image structures IMflt emIMG; IMflt muIMG; // mu-map image PyObject * o_mumap; // mu-map mask (based on smoothed mu-map to enable further extension of attenuating/scattering voxels) PyObject * o_mumsk; // emiassion image PyObject * o_emimg; //3D scatter LUTs PyObject * o_sctLUT; // axial LUTs PyObject * o_axLUT; // transaxial LUT dictionary (e.g., 2D sino where dead bins are out). PyObject * o_txLUT; //output dictionary for scatter results PyObject * o_sctout; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Parse the input tuple */ if (!PyArg_ParseTuple(args, "OOOOOOOO", &o_sctout, &o_mumap, &o_mumsk, &o_emimg, &o_sctLUT, &o_txLUT, &o_axLUT, &o_mmrcnst)) return NULL; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ //output dictionary for results PyObject* pd_xsxu = PyDict_GetItemString(o_sctout, "xsxu"); PyObject* pd_bind = PyDict_GetItemString(o_sctout, "bin_indx"); PyObject* pd_sval = PyDict_GetItemString(o_sctout, "sct_val"); PyObject* pd_sct3 = PyDict_GetItemString(o_sctout, "sct_3d"); //trasaxial crystal LUTs: PyObject* pd_crs = PyDict_GetItemString(o_txLUT, "crs"); //axial luts: PyObject* pd_sn1_rno = PyDict_GetItemString(o_axLUT, "sn1_rno"); PyObject* pd_sn1_sn11 = PyDict_GetItemString(o_axLUT, "sn1_sn11"); //scatter luts: PyObject* pd_sctaxR = PyDict_GetItemString(o_sctLUT, "sctaxR"); PyObject* pd_sctaxW = PyDict_GetItemString(o_sctLUT, "sctaxW"); PyObject* pd_offseg = PyDict_GetItemString(o_sctLUT, "offseg"); PyObject* pd_isrng = PyDict_GetItemString(o_sctLUT, "isrng"); PyObject* pd_KN = PyDict_GetItemString(o_sctLUT, "KN"); /* Interpret the input objects as numpy arrays. */ PyObject* pd_aw = PyDict_GetItemString(o_mmrcnst, "Naw"); Cnt.aw = (int)PyInt_AsLong(pd_aw); PyObject* pd_A = PyDict_GetItemString(o_mmrcnst, "NSANGLES"); Cnt.A = (int)PyInt_AsLong(pd_A); PyObject* pd_W = PyDict_GetItemString(o_mmrcnst, "NSBINS"); Cnt.W = (int)PyInt_AsLong(pd_W); PyObject* pd_NSN1 = PyDict_GetItemString(o_mmrcnst, "NSN1"); Cnt.NSN1 = (int)PyInt_AsLong(pd_NSN1); PyObject* pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11"); Cnt.NSN11 = (int)PyInt_AsLong(pd_NSN11); PyObject* pd_NSN64 = PyDict_GetItemString(o_mmrcnst, "NSN64"); Cnt.NSN64 = (int)PyInt_AsLong(pd_NSN64); PyObject* pd_MRD = PyDict_GetItemString(o_mmrcnst, "MRD"); Cnt.MRD = (int)PyInt_AsLong(pd_MRD); PyObject* pd_NRNG = PyDict_GetItemString(o_mmrcnst, "NRNG"); Cnt.NRNG = (int)PyInt_AsLong(pd_NRNG); PyObject* pd_NSRNG = PyDict_GetItemString(o_mmrcnst, "NSRNG"); Cnt.NSRNG = (int)PyInt_AsLong(pd_NSRNG); PyObject* pd_NCRS = PyDict_GetItemString(o_mmrcnst, "NCRS"); Cnt.NCRS = (int)PyInt_AsLong(pd_NCRS); PyObject* pd_NSEG0 = PyDict_GetItemString(o_mmrcnst, "NSEG0"); Cnt.NSEG0 = (int)PyInt_AsLong(pd_NSEG0); PyObject* pd_ALPHA = PyDict_GetItemString(o_mmrcnst, "ALPHA"); Cnt.ALPHA = (float)PyFloat_AsDouble(pd_ALPHA); PyObject* pd_AXR = PyDict_GetItemString(o_mmrcnst, "AXR"); Cnt.AXR = (float)PyFloat_AsDouble(pd_AXR); PyObject* pd_RRING = PyDict_GetItemString(o_mmrcnst, "RE"); Cnt.RE = (float)PyFloat_AsDouble(pd_RRING); PyObject* pd_TOFBINN = PyDict_GetItemString(o_mmrcnst, "TOFBINN"); Cnt.TOFBINN = (int)PyInt_AsLong(pd_TOFBINN); PyObject* pd_TOFBINS = PyDict_GetItemString(o_mmrcnst, "TOFBINS"); Cnt.TOFBINS = (float)PyFloat_AsDouble(pd_TOFBINS); PyObject* pd_TOFBIND = PyDict_GetItemString(o_mmrcnst, "TOFBIND"); Cnt.TOFBIND = (float)PyFloat_AsDouble(pd_TOFBIND); PyObject* pd_ITOFBIND = PyDict_GetItemString(o_mmrcnst, "ITOFBIND"); Cnt.ITOFBIND = (float)PyFloat_AsDouble(pd_ITOFBIND); PyObject* pd_ETHRLD = PyDict_GetItemString(o_mmrcnst, "ETHRLD"); Cnt.ETHRLD = (float)PyFloat_AsDouble(pd_ETHRLD); PyObject* pd_COSUPSMX = PyDict_GetItemString(o_mmrcnst, "COSUPSMX"); Cnt.COSUPSMX = (float)PyFloat_AsDouble(pd_COSUPSMX); PyObject* pd_span = PyDict_GetItemString(o_mmrcnst, "SPN"); Cnt.SPN = (int)PyInt_AsLong(pd_span); PyObject* pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT"); Cnt.RNG_STRT = (char)PyInt_AS_LONG(pd_rngstrt); PyObject* pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END"); Cnt.RNG_END = (char)PyInt_AS_LONG(pd_rngend); PyObject* pd_verbose = PyDict_GetItemString(o_mmrcnst, "VERBOSE"); Cnt.VERBOSE = (bool)PyInt_AS_LONG(pd_verbose); PyObject* pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID"); Cnt.DEVID = (char)PyInt_AS_LONG(pd_devid); // PyObject* pd_ICOSSTP = PyDict_GetItemString(o_mmrcnst, "ICOSSTP"); // Cnt.ICOSSTP = (float) PyFloat_AsDouble(pd_ICOSSTP); // PyObject* pd_SS_IMZ = PyDict_GetItemString(o_mmrcnst, "SS_IMZ"); // Cnt.SS_IMZ = (float) PyFloat_AsDouble(pd_SS_IMZ); // PyObject* pd_SS_IMY = PyDict_GetItemString(o_mmrcnst, "SS_IMY"); // Cnt.SS_IMY = (float) PyFloat_AsDouble(pd_SS_IMY); // PyObject* pd_SS_IMX = PyDict_GetItemString(o_mmrcnst, "SS_IMX"); // Cnt.SS_IMX = (float) PyFloat_AsDouble(pd_SS_IMX); // PyObject* pd_SS_VXZ = PyDict_GetItemString(o_mmrcnst, "SS_VXZ"); // Cnt.SS_VXZ = (float) PyFloat_AsDouble(pd_SS_VXZ); // PyObject* pd_SS_VXY = PyDict_GetItemString(o_mmrcnst, "SS_VXY"); // Cnt.SS_VXY = (float) PyFloat_AsDouble(pd_SS_VXY); // PyObject* pd_SSE_IMZ = PyDict_GetItemString(o_mmrcnst, "SSE_IMZ"); // Cnt.SSE_IMZ = (float) PyFloat_AsDouble(pd_SSE_IMZ); // PyObject* pd_SSE_IMY = PyDict_GetItemString(o_mmrcnst, "SSE_IMY"); // Cnt.SSE_IMY = (float) PyFloat_AsDouble(pd_SSE_IMY); // PyObject* pd_SSE_IMX = PyDict_GetItemString(o_mmrcnst, "SSE_IMX"); // Cnt.SSE_IMX = (float) PyFloat_AsDouble(pd_SSE_IMX); // PyObject* pd_SSE_VXZ = PyDict_GetItemString(o_mmrcnst, "SSE_VXZ"); // Cnt.SSE_VXZ = (float) PyFloat_AsDouble(pd_SSE_VXZ); // PyObject* pd_SSE_VXY = PyDict_GetItemString(o_mmrcnst, "SSE_VXY"); // Cnt.SSE_VXY = (float) PyFloat_AsDouble(pd_SSE_VXY); //output results PyObject *p_xsxu = PyArray_FROM_OTF(pd_xsxu, NPY_INT8, NPY_IN_ARRAY); PyObject *p_bind = PyArray_FROM_OTF(pd_bind, NPY_INT32, NPY_IN_ARRAY); PyObject *p_sval = PyArray_FROM_OTF(pd_sval, NPY_FLOAT32, NPY_IN_ARRAY); PyObject *p_sct3 = PyArray_FROM_OTF(pd_sct3, NPY_FLOAT32, NPY_IN_ARRAY); //-- trasaxial crystal LUTs: PyObject *p_crs = PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_IN_ARRAY); PyObject *p_mumap = PyArray_FROM_OTF(o_mumap, NPY_FLOAT32, NPY_IN_ARRAY); PyObject *p_mumsk = PyArray_FROM_OTF(o_mumsk, NPY_INT8, NPY_IN_ARRAY); PyObject *p_emimg = PyArray_FROM_OTF(o_emimg, NPY_FLOAT32, NPY_IN_ARRAY); //-- //-- get the arrays form the dictionaries (objects) PyObject *p_sn1_rno = PyArray_FROM_OTF(pd_sn1_rno, NPY_INT16, NPY_IN_ARRAY); PyObject *p_sn1_sn11 = PyArray_FROM_OTF(pd_sn1_sn11, NPY_INT16, NPY_IN_ARRAY); PyObject *p_isrng = PyArray_FROM_OTF(pd_isrng, NPY_INT16, NPY_IN_ARRAY); PyObject *p_offseg = PyArray_FROM_OTF(pd_offseg, NPY_INT16, NPY_IN_ARRAY); PyObject *p_sctaxR = PyArray_FROM_OTF(pd_sctaxR, NPY_INT32, NPY_IN_ARRAY); PyObject *p_sctaxW = PyArray_FROM_OTF(pd_sctaxW, NPY_FLOAT32, NPY_IN_ARRAY); PyObject *p_KN = PyArray_FROM_OTF(pd_KN, NPY_FLOAT32, NPY_IN_ARRAY); //-- /* If that didn't work, throw an exception. */ if (p_mumap == NULL || p_mumsk == NULL || p_emimg == NULL || p_sn1_rno == NULL || p_sn1_sn11 == NULL || p_sctaxR == NULL || p_sctaxW == NULL || p_offseg == NULL || p_isrng == NULL || p_KN == NULL || p_crs == NULL || p_xsxu == NULL || p_bind == NULL || p_sval == NULL || p_sct3 == NULL) { Py_XDECREF(p_mumap); Py_XDECREF(p_mumsk); Py_XDECREF(p_emimg); Py_XDECREF(p_sn1_rno); Py_XDECREF(p_sn1_sn11); Py_XDECREF(p_offseg); Py_XDECREF(p_isrng); Py_XDECREF(p_sctaxR); Py_XDECREF(p_sctaxW); Py_XDECREF(p_KN); Py_XDECREF(p_crs); Py_XDECREF(p_xsxu); Py_XDECREF(p_bind); Py_XDECREF(p_sval); Py_XDECREF(p_sct3); printf("e> problem with getting the images and LUTs in C functions... :(\n"); return NULL; } //get the c-type arrays char *mumsk = (char*)PyArray_DATA(p_mumsk); float *mumap = (float*)PyArray_DATA(p_mumap); float *emimg = (float*)PyArray_DATA(p_emimg); short *sn1_rno = (short*)PyArray_DATA(p_sn1_rno); short *sn1_sn11 = (short*)PyArray_DATA(p_sn1_sn11); float *crs = (float*)PyArray_DATA(p_crs); //indecies of rings included in scatter estimation short *isrng = (short*)PyArray_DATA(p_isrng); //offset in each segment used for rings to sino LUT short *offseg = (short*)PyArray_DATA(p_offseg); //scatter sino indeces in axial dimensions through michelogram used for interpolation in 3D int *sctaxR = (int*)PyArray_DATA(p_sctaxR); //weightes for the interpolation in 3D (used together with the above) float *sctaxW = (float*)PyArray_DATA(p_sctaxW); //K-N probabilities in the LUT float *KNlut = (float*)PyArray_DATA(p_KN); //output structure scatOUT sctout; sctout.xsxu = (char*)PyArray_DATA(p_xsxu); sctout.bind = (int*)PyArray_DATA(p_bind); sctout.sval = (float*)PyArray_DATA(p_sval); sctout.s3d = (float*)PyArray_DATA(p_sct3); //Get the image dims muIMG.nvx = (size_t)(PyArray_DIM(p_mumap, 0) * PyArray_DIM(p_mumap, 1) * PyArray_DIM(p_mumap, 2)); emIMG.nvx = (size_t)(PyArray_DIM(p_emimg, 0) * PyArray_DIM(p_emimg, 1) * PyArray_DIM(p_emimg, 2)); if (muIMG.nvx != emIMG.nvx) printf("\nw> mu-map and emission image have different dims: mu.nvx = %d, em.nvx = %d\n", muIMG.nvx, emIMG.nvx); //get the stats in the img structure float mumx = -1e12, emmx = -1e12, mumn = 1e12, emmn = 1e12; for (int i = 0; i<muIMG.nvx; i++) { if (mumap[i]>mumx) mumx = mumap[i]; if (mumap[i]<mumn) mumn = mumap[i]; } for (int i = 0; i<emIMG.nvx; i++) { if (emimg[i]>emmx) emmx = emimg[i]; if (emimg[i]<emmn) emmn = emimg[i]; } muIMG.im = mumap; emIMG.im = emimg; muIMG.max = mumx; emIMG.max = emmx; muIMG.min = mumn; emIMG.min = emmn; muIMG.n10mx = 0; emIMG.n10mx = 0; for (int i = 0; i<muIMG.nvx; i++) if (mumap[i]>0.1*mumx) muIMG.n10mx += 1; for (int i = 0; i<emIMG.nvx; i++) if (emimg[i]>0.1*emmx) emIMG.n10mx += 1; if (Cnt.VERBOSE == 1) printf("i> mumx = %f, mumin = %f, emmx = %f, emmn = %f\n", mumx, mumn, emmx, emmn); // sets the device on which to calculate hipSetDevice(Cnt.DEVID); //<><><><><><><><><> S C A T T E R K E R N E L <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><> prob_scatt(sctout, KNlut, mumsk, muIMG, emIMG, sctaxR, sctaxW, offseg, isrng, crs, sn1_rno, sn1_sn11, Cnt); hipDeviceSynchronize(); //<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><> //Clean up if (Cnt.VERBOSE == 1) printf("ci> cleaning scatter variables..."); Py_DECREF(p_mumap); Py_DECREF(p_mumsk); Py_DECREF(p_emimg); Py_DECREF(p_sn1_rno); Py_DECREF(p_sn1_sn11); Py_DECREF(p_isrng); Py_DECREF(p_offseg); Py_DECREF(p_sctaxR); Py_DECREF(p_sctaxW); Py_DECREF(p_xsxu); Py_DECREF(p_bind); Py_DECREF(p_sval); Py_DECREF(p_sct3); Py_INCREF(Py_None); if (Cnt.VERBOSE == 1) printf("DONE.\n"); return Py_None; }
36f8c12dac8d615e9a1c69f0d8f3904491e052f3.cu
/*------------------------------------------------------------------------ Python extension for CUDA routines used for voxel-driven scatter modelling (VSM) author: Pawel Markiewicz Copyrights: 2018 ------------------------------------------------------------------------*/ #include <Python.h> #include <stdlib.h> #include <numpy/arrayobject.h> #include "def.h" #include "sct.h" #include "sctaux.h" //=== PYTHON STUFF === //--- Docstrings static char module_docstring[] = "This module provides an interface for single scatter modelling."; static char scatter_docstring[] = "Estimates scatter event sinograms using mu-map and emission image (estimate)."; //--- Available functions static PyObject *mmr_scat(PyObject *self, PyObject *args); /* Module specification */ static PyMethodDef module_methods[] = { { "scatter", mmr_scat, METH_VARARGS, scatter_docstring }, { NULL, NULL, 0, NULL } }; //--- //--- Initialize the module PyMODINIT_FUNC initpetsct(void) //it HAS to be init______ and then the name of the shared lib. { PyObject *m = Py_InitModule3("petsct", module_methods, module_docstring); if (m == NULL) return; /* Load NumPy functionality. */ import_array(); } //--- //======================= //====================================================================================== // E S T I M A T I N G S C A T T E R E V E N T S //-------------------------------------------------------------------------------------- static PyObject *mmr_scat(PyObject *self, PyObject *args) { //Structure of constants Cnst Cnt; //Dictionary of scanner constants PyObject * o_mmrcnst; //Image structures IMflt emIMG; IMflt muIMG; // mu-map image PyObject * o_mumap; // mu-map mask (based on smoothed mu-map to enable further extension of attenuating/scattering voxels) PyObject * o_mumsk; // emiassion image PyObject * o_emimg; //3D scatter LUTs PyObject * o_sctLUT; // axial LUTs PyObject * o_axLUT; // transaxial LUT dictionary (e.g., 2D sino where dead bins are out). PyObject * o_txLUT; //output dictionary for scatter results PyObject * o_sctout; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Parse the input tuple */ if (!PyArg_ParseTuple(args, "OOOOOOOO", &o_sctout, &o_mumap, &o_mumsk, &o_emimg, &o_sctLUT, &o_txLUT, &o_axLUT, &o_mmrcnst)) return NULL; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ //output dictionary for results PyObject* pd_xsxu = PyDict_GetItemString(o_sctout, "xsxu"); PyObject* pd_bind = PyDict_GetItemString(o_sctout, "bin_indx"); PyObject* pd_sval = PyDict_GetItemString(o_sctout, "sct_val"); PyObject* pd_sct3 = PyDict_GetItemString(o_sctout, "sct_3d"); //trasaxial crystal LUTs: PyObject* pd_crs = PyDict_GetItemString(o_txLUT, "crs"); //axial luts: PyObject* pd_sn1_rno = PyDict_GetItemString(o_axLUT, "sn1_rno"); PyObject* pd_sn1_sn11 = PyDict_GetItemString(o_axLUT, "sn1_sn11"); //scatter luts: PyObject* pd_sctaxR = PyDict_GetItemString(o_sctLUT, "sctaxR"); PyObject* pd_sctaxW = PyDict_GetItemString(o_sctLUT, "sctaxW"); PyObject* pd_offseg = PyDict_GetItemString(o_sctLUT, "offseg"); PyObject* pd_isrng = PyDict_GetItemString(o_sctLUT, "isrng"); PyObject* pd_KN = PyDict_GetItemString(o_sctLUT, "KN"); /* Interpret the input objects as numpy arrays. */ PyObject* pd_aw = PyDict_GetItemString(o_mmrcnst, "Naw"); Cnt.aw = (int)PyInt_AsLong(pd_aw); PyObject* pd_A = PyDict_GetItemString(o_mmrcnst, "NSANGLES"); Cnt.A = (int)PyInt_AsLong(pd_A); PyObject* pd_W = PyDict_GetItemString(o_mmrcnst, "NSBINS"); Cnt.W = (int)PyInt_AsLong(pd_W); PyObject* pd_NSN1 = PyDict_GetItemString(o_mmrcnst, "NSN1"); Cnt.NSN1 = (int)PyInt_AsLong(pd_NSN1); PyObject* pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11"); Cnt.NSN11 = (int)PyInt_AsLong(pd_NSN11); PyObject* pd_NSN64 = PyDict_GetItemString(o_mmrcnst, "NSN64"); Cnt.NSN64 = (int)PyInt_AsLong(pd_NSN64); PyObject* pd_MRD = PyDict_GetItemString(o_mmrcnst, "MRD"); Cnt.MRD = (int)PyInt_AsLong(pd_MRD); PyObject* pd_NRNG = PyDict_GetItemString(o_mmrcnst, "NRNG"); Cnt.NRNG = (int)PyInt_AsLong(pd_NRNG); PyObject* pd_NSRNG = PyDict_GetItemString(o_mmrcnst, "NSRNG"); Cnt.NSRNG = (int)PyInt_AsLong(pd_NSRNG); PyObject* pd_NCRS = PyDict_GetItemString(o_mmrcnst, "NCRS"); Cnt.NCRS = (int)PyInt_AsLong(pd_NCRS); PyObject* pd_NSEG0 = PyDict_GetItemString(o_mmrcnst, "NSEG0"); Cnt.NSEG0 = (int)PyInt_AsLong(pd_NSEG0); PyObject* pd_ALPHA = PyDict_GetItemString(o_mmrcnst, "ALPHA"); Cnt.ALPHA = (float)PyFloat_AsDouble(pd_ALPHA); PyObject* pd_AXR = PyDict_GetItemString(o_mmrcnst, "AXR"); Cnt.AXR = (float)PyFloat_AsDouble(pd_AXR); PyObject* pd_RRING = PyDict_GetItemString(o_mmrcnst, "RE"); Cnt.RE = (float)PyFloat_AsDouble(pd_RRING); PyObject* pd_TOFBINN = PyDict_GetItemString(o_mmrcnst, "TOFBINN"); Cnt.TOFBINN = (int)PyInt_AsLong(pd_TOFBINN); PyObject* pd_TOFBINS = PyDict_GetItemString(o_mmrcnst, "TOFBINS"); Cnt.TOFBINS = (float)PyFloat_AsDouble(pd_TOFBINS); PyObject* pd_TOFBIND = PyDict_GetItemString(o_mmrcnst, "TOFBIND"); Cnt.TOFBIND = (float)PyFloat_AsDouble(pd_TOFBIND); PyObject* pd_ITOFBIND = PyDict_GetItemString(o_mmrcnst, "ITOFBIND"); Cnt.ITOFBIND = (float)PyFloat_AsDouble(pd_ITOFBIND); PyObject* pd_ETHRLD = PyDict_GetItemString(o_mmrcnst, "ETHRLD"); Cnt.ETHRLD = (float)PyFloat_AsDouble(pd_ETHRLD); PyObject* pd_COSUPSMX = PyDict_GetItemString(o_mmrcnst, "COSUPSMX"); Cnt.COSUPSMX = (float)PyFloat_AsDouble(pd_COSUPSMX); PyObject* pd_span = PyDict_GetItemString(o_mmrcnst, "SPN"); Cnt.SPN = (int)PyInt_AsLong(pd_span); PyObject* pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT"); Cnt.RNG_STRT = (char)PyInt_AS_LONG(pd_rngstrt); PyObject* pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END"); Cnt.RNG_END = (char)PyInt_AS_LONG(pd_rngend); PyObject* pd_verbose = PyDict_GetItemString(o_mmrcnst, "VERBOSE"); Cnt.VERBOSE = (bool)PyInt_AS_LONG(pd_verbose); PyObject* pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID"); Cnt.DEVID = (char)PyInt_AS_LONG(pd_devid); // PyObject* pd_ICOSSTP = PyDict_GetItemString(o_mmrcnst, "ICOSSTP"); // Cnt.ICOSSTP = (float) PyFloat_AsDouble(pd_ICOSSTP); // PyObject* pd_SS_IMZ = PyDict_GetItemString(o_mmrcnst, "SS_IMZ"); // Cnt.SS_IMZ = (float) PyFloat_AsDouble(pd_SS_IMZ); // PyObject* pd_SS_IMY = PyDict_GetItemString(o_mmrcnst, "SS_IMY"); // Cnt.SS_IMY = (float) PyFloat_AsDouble(pd_SS_IMY); // PyObject* pd_SS_IMX = PyDict_GetItemString(o_mmrcnst, "SS_IMX"); // Cnt.SS_IMX = (float) PyFloat_AsDouble(pd_SS_IMX); // PyObject* pd_SS_VXZ = PyDict_GetItemString(o_mmrcnst, "SS_VXZ"); // Cnt.SS_VXZ = (float) PyFloat_AsDouble(pd_SS_VXZ); // PyObject* pd_SS_VXY = PyDict_GetItemString(o_mmrcnst, "SS_VXY"); // Cnt.SS_VXY = (float) PyFloat_AsDouble(pd_SS_VXY); // PyObject* pd_SSE_IMZ = PyDict_GetItemString(o_mmrcnst, "SSE_IMZ"); // Cnt.SSE_IMZ = (float) PyFloat_AsDouble(pd_SSE_IMZ); // PyObject* pd_SSE_IMY = PyDict_GetItemString(o_mmrcnst, "SSE_IMY"); // Cnt.SSE_IMY = (float) PyFloat_AsDouble(pd_SSE_IMY); // PyObject* pd_SSE_IMX = PyDict_GetItemString(o_mmrcnst, "SSE_IMX"); // Cnt.SSE_IMX = (float) PyFloat_AsDouble(pd_SSE_IMX); // PyObject* pd_SSE_VXZ = PyDict_GetItemString(o_mmrcnst, "SSE_VXZ"); // Cnt.SSE_VXZ = (float) PyFloat_AsDouble(pd_SSE_VXZ); // PyObject* pd_SSE_VXY = PyDict_GetItemString(o_mmrcnst, "SSE_VXY"); // Cnt.SSE_VXY = (float) PyFloat_AsDouble(pd_SSE_VXY); //output results PyObject *p_xsxu = PyArray_FROM_OTF(pd_xsxu, NPY_INT8, NPY_IN_ARRAY); PyObject *p_bind = PyArray_FROM_OTF(pd_bind, NPY_INT32, NPY_IN_ARRAY); PyObject *p_sval = PyArray_FROM_OTF(pd_sval, NPY_FLOAT32, NPY_IN_ARRAY); PyObject *p_sct3 = PyArray_FROM_OTF(pd_sct3, NPY_FLOAT32, NPY_IN_ARRAY); //-- trasaxial crystal LUTs: PyObject *p_crs = PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_IN_ARRAY); PyObject *p_mumap = PyArray_FROM_OTF(o_mumap, NPY_FLOAT32, NPY_IN_ARRAY); PyObject *p_mumsk = PyArray_FROM_OTF(o_mumsk, NPY_INT8, NPY_IN_ARRAY); PyObject *p_emimg = PyArray_FROM_OTF(o_emimg, NPY_FLOAT32, NPY_IN_ARRAY); //-- //-- get the arrays form the dictionaries (objects) PyObject *p_sn1_rno = PyArray_FROM_OTF(pd_sn1_rno, NPY_INT16, NPY_IN_ARRAY); PyObject *p_sn1_sn11 = PyArray_FROM_OTF(pd_sn1_sn11, NPY_INT16, NPY_IN_ARRAY); PyObject *p_isrng = PyArray_FROM_OTF(pd_isrng, NPY_INT16, NPY_IN_ARRAY); PyObject *p_offseg = PyArray_FROM_OTF(pd_offseg, NPY_INT16, NPY_IN_ARRAY); PyObject *p_sctaxR = PyArray_FROM_OTF(pd_sctaxR, NPY_INT32, NPY_IN_ARRAY); PyObject *p_sctaxW = PyArray_FROM_OTF(pd_sctaxW, NPY_FLOAT32, NPY_IN_ARRAY); PyObject *p_KN = PyArray_FROM_OTF(pd_KN, NPY_FLOAT32, NPY_IN_ARRAY); //-- /* If that didn't work, throw an exception. */ if (p_mumap == NULL || p_mumsk == NULL || p_emimg == NULL || p_sn1_rno == NULL || p_sn1_sn11 == NULL || p_sctaxR == NULL || p_sctaxW == NULL || p_offseg == NULL || p_isrng == NULL || p_KN == NULL || p_crs == NULL || p_xsxu == NULL || p_bind == NULL || p_sval == NULL || p_sct3 == NULL) { Py_XDECREF(p_mumap); Py_XDECREF(p_mumsk); Py_XDECREF(p_emimg); Py_XDECREF(p_sn1_rno); Py_XDECREF(p_sn1_sn11); Py_XDECREF(p_offseg); Py_XDECREF(p_isrng); Py_XDECREF(p_sctaxR); Py_XDECREF(p_sctaxW); Py_XDECREF(p_KN); Py_XDECREF(p_crs); Py_XDECREF(p_xsxu); Py_XDECREF(p_bind); Py_XDECREF(p_sval); Py_XDECREF(p_sct3); printf("e> problem with getting the images and LUTs in C functions... :(\n"); return NULL; } //get the c-type arrays char *mumsk = (char*)PyArray_DATA(p_mumsk); float *mumap = (float*)PyArray_DATA(p_mumap); float *emimg = (float*)PyArray_DATA(p_emimg); short *sn1_rno = (short*)PyArray_DATA(p_sn1_rno); short *sn1_sn11 = (short*)PyArray_DATA(p_sn1_sn11); float *crs = (float*)PyArray_DATA(p_crs); //indecies of rings included in scatter estimation short *isrng = (short*)PyArray_DATA(p_isrng); //offset in each segment used for rings to sino LUT short *offseg = (short*)PyArray_DATA(p_offseg); //scatter sino indeces in axial dimensions through michelogram used for interpolation in 3D int *sctaxR = (int*)PyArray_DATA(p_sctaxR); //weightes for the interpolation in 3D (used together with the above) float *sctaxW = (float*)PyArray_DATA(p_sctaxW); //K-N probabilities in the LUT float *KNlut = (float*)PyArray_DATA(p_KN); //output structure scatOUT sctout; sctout.xsxu = (char*)PyArray_DATA(p_xsxu); sctout.bind = (int*)PyArray_DATA(p_bind); sctout.sval = (float*)PyArray_DATA(p_sval); sctout.s3d = (float*)PyArray_DATA(p_sct3); //Get the image dims muIMG.nvx = (size_t)(PyArray_DIM(p_mumap, 0) * PyArray_DIM(p_mumap, 1) * PyArray_DIM(p_mumap, 2)); emIMG.nvx = (size_t)(PyArray_DIM(p_emimg, 0) * PyArray_DIM(p_emimg, 1) * PyArray_DIM(p_emimg, 2)); if (muIMG.nvx != emIMG.nvx) printf("\nw> mu-map and emission image have different dims: mu.nvx = %d, em.nvx = %d\n", muIMG.nvx, emIMG.nvx); //get the stats in the img structure float mumx = -1e12, emmx = -1e12, mumn = 1e12, emmn = 1e12; for (int i = 0; i<muIMG.nvx; i++) { if (mumap[i]>mumx) mumx = mumap[i]; if (mumap[i]<mumn) mumn = mumap[i]; } for (int i = 0; i<emIMG.nvx; i++) { if (emimg[i]>emmx) emmx = emimg[i]; if (emimg[i]<emmn) emmn = emimg[i]; } muIMG.im = mumap; emIMG.im = emimg; muIMG.max = mumx; emIMG.max = emmx; muIMG.min = mumn; emIMG.min = emmn; muIMG.n10mx = 0; emIMG.n10mx = 0; for (int i = 0; i<muIMG.nvx; i++) if (mumap[i]>0.1*mumx) muIMG.n10mx += 1; for (int i = 0; i<emIMG.nvx; i++) if (emimg[i]>0.1*emmx) emIMG.n10mx += 1; if (Cnt.VERBOSE == 1) printf("i> mumx = %f, mumin = %f, emmx = %f, emmn = %f\n", mumx, mumn, emmx, emmn); // sets the device on which to calculate cudaSetDevice(Cnt.DEVID); //<><><><><><><><><> S C A T T E R K E R N E L <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><> prob_scatt(sctout, KNlut, mumsk, muIMG, emIMG, sctaxR, sctaxW, offseg, isrng, crs, sn1_rno, sn1_sn11, Cnt); cudaDeviceSynchronize(); //<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><> //Clean up if (Cnt.VERBOSE == 1) printf("ci> cleaning scatter variables..."); Py_DECREF(p_mumap); Py_DECREF(p_mumsk); Py_DECREF(p_emimg); Py_DECREF(p_sn1_rno); Py_DECREF(p_sn1_sn11); Py_DECREF(p_isrng); Py_DECREF(p_offseg); Py_DECREF(p_sctaxR); Py_DECREF(p_sctaxW); Py_DECREF(p_xsxu); Py_DECREF(p_bind); Py_DECREF(p_sval); Py_DECREF(p_sct3); Py_INCREF(Py_None); if (Cnt.VERBOSE == 1) printf("DONE.\n"); return Py_None; }
64e0c5b8452ba7f782af88577984860fe72a6618.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by grzegorz on 20.01.2020. // #include "include/SHA1cudaDigestGenerator.cuh" #include "include/SHA1_cuda.cuh" #include <cmath> #include <chrono> #include <cstring> #include <iostream> std::string SHA1cudaDigestGenerator::getAlgorithmName() { return "sha1_cuda"; } unsigned int SHA1cudaDigestGenerator::getDigestLength() { return 20; } unsigned int SHA1cudaDigestGenerator::calculateWorkingBufferLength(unsigned int defaultWordLength) { unsigned int toAdd = 64 - (defaultWordLength + 8) % 64; if (toAdd == 0) toAdd = 64; return (defaultWordLength + toAdd + 8)/4; } void SHA1cudaDigestGenerator::generate() { unsigned char *digestGPU; char *wordsGPU; auto startLoad = std::chrono::high_resolution_clock::now(); unsigned long int workingBufferLength = calculateWorkingBufferLength(length_to_gen); if (workingBufferLength > 256/4) { std::cout << "error workingBufferLength > 200 " << std::endl; return; } hipError_t errorCode; if ((errorCode = hipMalloc((void **) &digestGPU, sizeof(unsigned char) * n_to_gen * getDigestLength())) != hipSuccess) { std::cout << "error during alloc memory for digest on GPU error code: " << hipGetErrorName(errorCode) << std::endl; return; }; unsigned int wordBufferLength = length_to_gen+4-length_to_gen%4; if ((errorCode = hipMalloc(&wordsGPU, sizeof(char) * n_to_gen * wordBufferLength)) != hipSuccess) { std::cout << "error during alloc memory for words on GPU error code: " << hipGetErrorName(errorCode) << std::endl; return; }; char *words_tmp = new char[wordBufferLength * n_to_gen]; for (unsigned int i = 0; i < n_to_gen; i++) { memcpy(words_tmp + i * wordBufferLength, words[i], sizeof(unsigned char) * length_to_gen); } hipMemcpy(wordsGPU, words_tmp, sizeof(unsigned char) * wordBufferLength * n_to_gen, hipMemcpyHostToDevice); delete[] words_tmp; auto stopLoad = std::chrono::high_resolution_clock::now(); auto durationLoad = std::chrono::duration_cast<std::chrono::milliseconds>(stopLoad - startLoad); std::cout << "gpu data load in: " << durationLoad.count() << " milliseconds" << std::endl; unsigned int blockSize = 256; unsigned int gridSize = (unsigned int) ceil((float) n_to_gen / blockSize); // std::cout << "number of blocks: " << gridSize << "\t number of threads per block: " << blockSize << std::endl; auto startKernel = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( SHA1_cuda::calculateHashSum) , dim3(gridSize), dim3(blockSize) , 0, 0, digestGPU, wordsGPU, workingBufferLength, length_to_gen, n_to_gen); errorCode = hipDeviceSynchronize(); auto stopKernel = std::chrono::high_resolution_clock::now(); // std::cout << "kernel quit code: " << hipGetErrorName(errorCode) << std::endl; auto durationKernel = std::chrono::duration_cast<std::chrono::milliseconds>(stopKernel - startKernel); std::cout << "kernel end work in in: " << durationKernel.count() << " milliseconds <-----------------" << std::endl; auto startUnload = std::chrono::high_resolution_clock::now(); digest = new unsigned char *[n_to_gen]; unsigned char *digest_tmp = new unsigned char[n_to_gen * getDigestLength()]; hipMemcpy(digest_tmp, digestGPU, sizeof(unsigned char) * getDigestLength() * n_to_gen, hipMemcpyDeviceToHost); digest[0] = digest_tmp; for (unsigned int i = 1; i < n_to_gen; i++) { digest[i] = digest[i-1] + getDigestLength(); // digest[i] = new unsigned char[getDigestLength()]; // memcpy(digest[i], digest_tmp + i * getDigestLength(), getDigestLength()); } hipFree(digestGPU); hipFree(wordsGPU); auto stopUnload = std::chrono::high_resolution_clock::now(); auto durationUnload = std::chrono::duration_cast<std::chrono::milliseconds>(stopUnload - startUnload); std::cout << "gpu data unload in: " << durationUnload.count() << " milliseconds" << std::endl; n = n_to_gen; length = length_to_gen; } bool SHA1cudaDigestGenerator::needOneDimArray() { return true; }
64e0c5b8452ba7f782af88577984860fe72a6618.cu
// // Created by grzegorz on 20.01.2020. // #include "include/SHA1cudaDigestGenerator.cuh" #include "include/SHA1_cuda.cuh" #include <cmath> #include <chrono> #include <cstring> #include <iostream> std::string SHA1cudaDigestGenerator::getAlgorithmName() { return "sha1_cuda"; } unsigned int SHA1cudaDigestGenerator::getDigestLength() { return 20; } unsigned int SHA1cudaDigestGenerator::calculateWorkingBufferLength(unsigned int defaultWordLength) { unsigned int toAdd = 64 - (defaultWordLength + 8) % 64; if (toAdd == 0) toAdd = 64; return (defaultWordLength + toAdd + 8)/4; } void SHA1cudaDigestGenerator::generate() { unsigned char *digestGPU; char *wordsGPU; auto startLoad = std::chrono::high_resolution_clock::now(); unsigned long int workingBufferLength = calculateWorkingBufferLength(length_to_gen); if (workingBufferLength > 256/4) { std::cout << "error workingBufferLength > 200 " << std::endl; return; } cudaError_t errorCode; if ((errorCode = cudaMalloc((void **) &digestGPU, sizeof(unsigned char) * n_to_gen * getDigestLength())) != cudaSuccess) { std::cout << "error during alloc memory for digest on GPU error code: " << cudaGetErrorName(errorCode) << std::endl; return; }; unsigned int wordBufferLength = length_to_gen+4-length_to_gen%4; if ((errorCode = cudaMalloc(&wordsGPU, sizeof(char) * n_to_gen * wordBufferLength)) != cudaSuccess) { std::cout << "error during alloc memory for words on GPU error code: " << cudaGetErrorName(errorCode) << std::endl; return; }; char *words_tmp = new char[wordBufferLength * n_to_gen]; for (unsigned int i = 0; i < n_to_gen; i++) { memcpy(words_tmp + i * wordBufferLength, words[i], sizeof(unsigned char) * length_to_gen); } cudaMemcpy(wordsGPU, words_tmp, sizeof(unsigned char) * wordBufferLength * n_to_gen, cudaMemcpyHostToDevice); delete[] words_tmp; auto stopLoad = std::chrono::high_resolution_clock::now(); auto durationLoad = std::chrono::duration_cast<std::chrono::milliseconds>(stopLoad - startLoad); std::cout << "gpu data load in: " << durationLoad.count() << " milliseconds" << std::endl; unsigned int blockSize = 256; unsigned int gridSize = (unsigned int) ceil((float) n_to_gen / blockSize); // std::cout << "number of blocks: " << gridSize << "\t number of threads per block: " << blockSize << std::endl; auto startKernel = std::chrono::high_resolution_clock::now(); SHA1_cuda::calculateHashSum <<< gridSize, blockSize >>> (digestGPU, wordsGPU, workingBufferLength, length_to_gen, n_to_gen); errorCode = cudaDeviceSynchronize(); auto stopKernel = std::chrono::high_resolution_clock::now(); // std::cout << "kernel quit code: " << cudaGetErrorName(errorCode) << std::endl; auto durationKernel = std::chrono::duration_cast<std::chrono::milliseconds>(stopKernel - startKernel); std::cout << "kernel end work in in: " << durationKernel.count() << " milliseconds <-----------------" << std::endl; auto startUnload = std::chrono::high_resolution_clock::now(); digest = new unsigned char *[n_to_gen]; unsigned char *digest_tmp = new unsigned char[n_to_gen * getDigestLength()]; cudaMemcpy(digest_tmp, digestGPU, sizeof(unsigned char) * getDigestLength() * n_to_gen, cudaMemcpyDeviceToHost); digest[0] = digest_tmp; for (unsigned int i = 1; i < n_to_gen; i++) { digest[i] = digest[i-1] + getDigestLength(); // digest[i] = new unsigned char[getDigestLength()]; // memcpy(digest[i], digest_tmp + i * getDigestLength(), getDigestLength()); } cudaFree(digestGPU); cudaFree(wordsGPU); auto stopUnload = std::chrono::high_resolution_clock::now(); auto durationUnload = std::chrono::duration_cast<std::chrono::milliseconds>(stopUnload - startUnload); std::cout << "gpu data unload in: " << durationUnload.count() << " milliseconds" << std::endl; n = n_to_gen; length = length_to_gen; } bool SHA1cudaDigestGenerator::needOneDimArray() { return true; }
980be9d60461509a1c4ef0b9701f0a91fa07eb8b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include "hip/hip_runtime.h" #include "rocblas.h" #define M 6 //6 #define N 5 //5 #define IDX2C(i,j,ld) (((j)*(ld))+(i)) /*i, jcublas ijld j*ld+i */ static __inline__ void modify(hipblasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){ hipblasSscal(handle, n - q, &alpha, &m[IDX2C(p, q, ldm)], ldm); hipblasSscal(handle, ldm - p, &beta, &m[IDX2C(p, q, ldm)], 1); } int main() { hipError_t cudaStatu; hipblasStatus_t state; hipblasHandle_t handle; //cublas int i, j; float* devPtrA; float* a; a = (float*)malloc(M * N * sizeof(float)); if (!a) { printf ("host memory allocation failed"); return EXIT_FAILURE; } for (i = 0; i < N; i++) { for (j = 0; j < M; j++) { a[IDX2C(j, i, M)] = (float)(j * M + i + 1); printf ("%7.0f", a[IDX2C(j, i, M)]); } printf ("\n"); } printf ("\n"); cudaStatu = hipMalloc((void**)&devPtrA, M*N*sizeof(float)); if (cudaStatu != hipSuccess) { printf ("device memory allocation failed"); return EXIT_FAILURE; } state = hipblasCreate(&handle); if (state != HIPBLAS_STATUS_SUCCESS) { printf ("CUBLAS initialization failed\n"); return EXIT_FAILURE; } state = hipblasSetMatrix(M, N, sizeof(float), a, M, devPtrA, M); if (state != HIPBLAS_STATUS_SUCCESS) { printf ("data download failed"); hipFree (devPtrA); hipblasDestroy(handle); return EXIT_FAILURE; } modify(handle, devPtrA, M, N, 1, 2, 16.0f, 12.0f); state = hipblasGetMatrix(M, N, sizeof(float), devPtrA, M, a, M); if (state != HIPBLAS_STATUS_SUCCESS) { printf ("data upload failed"); hipFree (devPtrA); hipblasDestroy(handle); return EXIT_FAILURE; } for (j = 0; j < N; j++) { for (i = 0; i < M; i++) { printf ("%7.0f", a[IDX2C(i,j,M)]); } printf ("\n"); } free(a); hipFree(devPtrA); hipblasDestroy(handle); return EXIT_SUCCESS; }
980be9d60461509a1c4ef0b9701f0a91fa07eb8b.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "cuda_runtime.h" #include "cublas_v2.h" #define M 6 //6行 #define N 5 //5列 #define IDX2C(i,j,ld) (((j)*(ld))+(i)) /*一个把坐标(i, j)转化为cublas数据格式坐标的函数; 假设一个点位于第i行第j列,每一列的长度为ld, 则计算出来的列优先坐标就是 j*ld+i */ static __inline__ void modify(cublasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){ cublasSscal(handle, n - q, &alpha, &m[IDX2C(p, q, ldm)], ldm); cublasSscal(handle, ldm - p, &beta, &m[IDX2C(p, q, ldm)], 1); } int main() { cudaError_t cudaStatu; cublasStatus_t state; cublasHandle_t handle; //管理一个cublas上下文的句柄 int i, j; float* devPtrA; float* a; a = (float*)malloc(M * N * sizeof(float)); if (!a) { printf ("host memory allocation failed"); return EXIT_FAILURE; } for (i = 0; i < N; i++) { for (j = 0; j < M; j++) { a[IDX2C(j, i, M)] = (float)(j * M + i + 1); printf ("%7.0f", a[IDX2C(j, i, M)]); } printf ("\n"); } printf ("\n"); cudaStatu = cudaMalloc((void**)&devPtrA, M*N*sizeof(float)); if (cudaStatu != cudaSuccess) { printf ("device memory allocation failed"); return EXIT_FAILURE; } state = cublasCreate(&handle); if (state != CUBLAS_STATUS_SUCCESS) { printf ("CUBLAS initialization failed\n"); return EXIT_FAILURE; } state = cublasSetMatrix(M, N, sizeof(float), a, M, devPtrA, M); if (state != CUBLAS_STATUS_SUCCESS) { printf ("data download failed"); cudaFree (devPtrA); cublasDestroy(handle); return EXIT_FAILURE; } modify(handle, devPtrA, M, N, 1, 2, 16.0f, 12.0f); state = cublasGetMatrix(M, N, sizeof(float), devPtrA, M, a, M); if (state != CUBLAS_STATUS_SUCCESS) { printf ("data upload failed"); cudaFree (devPtrA); cublasDestroy(handle); return EXIT_FAILURE; } for (j = 0; j < N; j++) { for (i = 0; i < M; i++) { printf ("%7.0f", a[IDX2C(i,j,M)]); } printf ("\n"); } free(a); cudaFree(devPtrA); cublasDestroy(handle); return EXIT_SUCCESS; }
0358e6bf2248715801090bb01703efd52cd33b77.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <complex> #include <sstream> #include <iomanip> #include <string> #include <stdio.h> // printf for device functions #include <SFML/Graphics.hpp> #include "fitsio.h" // fits files for fake data #include "bitmap_image.hpp" // colorscheme #include <hipfft.h> #include <hiprand/hiprand.h> #include <hip/hip_runtime.h> // nvcc -o gpu_vis_streamed gpu_vis_streamed.cu -lsfml-graphics -lcfitsio -O2 -lcufft -std=c++11 -Wno-deprecated-gpu-targets #define IDX(t,f,n) (((t)*220*2048) + ((f)*220) + (n)) #define SZ 1024 #define N_ANTENNAS 10 #define N_BASELINES (N_ANTENNAS - 1)*(N_ANTENNAS)/2 #define N_STREAMS 5 #define N_TIMESTEPS 300 #define N_DUMPPOINTS 10000 #include "gpu_vis_streamed.cuh" int main (){ /*************************************************** Constants of operation ***************************************************/ int idx_usable_start = 300; int idx_usable_end = 1800; int n_usable_idx = idx_usable_end - idx_usable_start; // int tot_f_index = 2048; // float f_total_start = 1.28; float f_tot_end = 1.53; // float bw = f_tot_end - f_total_start; // int n_antennas = 10; // float pos[10][3] = {{ 2.32700165e-05, 4.43329978e+02, 1.02765904e-05}, // { 2.21098953e-05, 4.36579978e+02, 9.64180626e-06}, // { 1.14039406e-05, 3.42179978e+02, 4.94604756e-06}, // {-1.14039406e-05, 1.53679978e+02, -4.94604756e-06}, // { 1.83569971e+02, -4.37387873e-05, 1.57832354e-05}, // {-1.03180029e+02, -1.03154892e-05, -2.81970909e-05}, // {-1.76180029e+02, -1.63615256e-06, -3.96178686e-05}, // // { 1.90319971e+02, -4.48715200e-05, 1.72737512e-05}, // {-2.03330030e+02, 1.63615256e-06, -4.39237587e-05}, // {-9.91080054e+02, -2.16759905e+02, -4.00017642e+00}}; // int ant_order[] = {1, 4, 5, 8, 6, 9, 2, 10, 3, 7}; float pos_extent = 1299.54002797; // std::cout << "Hello, position: " << pos[3][2] << std::endl; /*************************************************** Datastructures for moving data ***************************************************/ sf::Uint8 *img = new sf::Uint8[SZ*SZ*N_STREAMS*4]; // temporary image if (!img){ std::cerr << "img not allocated" << std::endl;} sf::Image image[N_STREAMS]; // SFML image object std::ostringstream oss; std::string title; // gpu timers float gpu_time_ms_solve = -1; float avg_gpu_time = 0; float avg_gpu_time_cnt = 0; // FFT plans and workspaces // fftw_plan plan; // fftw_complex *A, *C; // A = (fftw_complex*) fftw_malloc(SZ*SZ*sizeof(fftw_complex)); // C = (fftw_complex*) fftw_malloc(SZ*SZ*sizeof(fftw_complex)); // hipfftComplex *A = (hipfftComplex*) malloc(sizeof(hipfftComplex)*SZ*SZ); // hipfftComplex *C = (hipfftComplex*) malloc(sizeof(hipfftComplex)*SZ*SZ); // hipfftComplex *A = new hipfftComplex[SZ*SZ]; //(sizeof(hipfftComplex)*SZ*SZ); hipfftComplex *C = new hipfftComplex[SZ*SZ*N_STREAMS]; if (!C){std::cerr << "Matricies not allocated" << std::endl;} hipfftComplex *d_A, *d_C, *d_A_cal, *d_B_cal; float *d_data, *d_norm; int n_pol = 2; int n_complex = 2; int n_floats_per_freq = (N_BASELINES + 10)*n_pol*n_complex; // # floats per frequency gpuErrchk(hipMalloc(&d_A, SZ*SZ*N_STREAMS*sizeof(hipfftComplex))); gpuErrchk(hipMalloc(&d_C, SZ*SZ*N_STREAMS*sizeof(hipfftComplex))); gpuErrchk(hipMalloc(&d_norm, SZ*SZ*N_STREAMS*sizeof(float))); gpuErrchk(hipMalloc(&d_A_cal, N_BASELINES*n_usable_idx*sizeof(hipfftComplex) )); gpuErrchk(hipMalloc(&d_B_cal, N_BASELINES*n_usable_idx*sizeof(hipfftComplex) )); gpuErrchk(hipMalloc(&d_data, n_floats_per_freq*n_usable_idx*N_TIMESTEPS*sizeof(float))); // int n_baselines = 55; // gpuErrchk(hipMalloc( &d_data, n_floats_per_freq*n_usable_idx*sizeof(float))); //d_data is indexed by [frequency, baseline] /*************************************************** Get visibility corrections and Upload to GPU ***************************************************/ // A_cal is indexed by [baseline, frequency] std::ifstream A_real, A_imag, B_real, B_imag; A_real.open("AA_real.txt"); A_imag.open("AA_imag.txt"); B_real.open("BB_real.txt"); B_imag.open("BB_imag.txt"); hipfftComplex* A_cal = new hipfftComplex[N_BASELINES*n_usable_idx]; hipfftComplex* B_cal = new hipfftComplex[N_BASELINES*n_usable_idx]; if (!A_cal){ std::cerr << "A_cal not allocated" << std::endl;} if (!B_cal){ std::cerr << "B_cal not allocated" << std::endl;} for (int i = 0; i < N_BASELINES; i++){ for (int j = 0; j < n_usable_idx; j++){ A_real >> A_cal[i*n_usable_idx + j].x; A_imag >> A_cal[i*n_usable_idx + j].y; B_real >> B_cal[i*n_usable_idx + j].x; B_imag >> B_cal[i*n_usable_idx + j].y; } } gpuErrchk(hipMemcpy(d_A_cal, A_cal, N_BASELINES*n_usable_idx*sizeof(hipfftComplex), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_B_cal, B_cal, N_BASELINES*n_usable_idx*sizeof(hipfftComplex), hipMemcpyHostToDevice)); delete[] A_cal; delete[] B_cal; /*************************************************** Make Plan for FFT ***************************************************/ std::cout << "Got corrections" << std::endl; hipfftHandle plan[N_STREAMS]; hipStream_t stream[N_STREAMS]; for (int i = 0; i < N_STREAMS; i++){ gpuErrchk(hipStreamCreate(&(stream[i]))); gpuFFTchk(hipfftPlan2d(&(plan[i]), SZ, SZ, HIPFFT_C2C)); } // plan = fftw_plan_dft_2d(SZ, SZ, A, C, FFTW_FORWARD, FFTW_ESTIMATE); std::cout << "Made plan" << std::endl; // float* norm = new float[SZ*SZ]; /*************************************************** Pull fits data into memory ***************************************************/ fitsfile *fptr; int status = 0; int ncols = 0; long nrows = 0; int px_per_res_elem = 3; std::cout << "Opening data file" << std::endl; fits_open_data(&fptr, "test.fits", READONLY, &status); std::cout << "Opened data file" <<std::endl; fits_get_num_rows(fptr, &nrows, &status); fits_get_num_cols(fptr, &ncols, &status); std::cout <<"rows: " << nrows <<" cols: " << ncols << std::endl; std::cout << "Frequencies: " << n_usable_idx << std::endl; int column_number = 1; int null_values; float *data = new float[nrows]; gpuErrchk(hipHostRegister(data, nrows*sizeof(float), hipHostRegisterPortable)); fits_read_col(fptr, TFLOAT, column_number, 1, 1, nrows, NULL, data, &null_values , &status); std::cout << "Done Reading" << std::endl; // for (int i = 0; i < 10; i++){ // std::cout << "data[" << i << "] = "<< data[IDX(231,300,i)] << std::endl; // } // gpuErrchk(hipMemcpy(d_data, &(data[IDX(231, 300, 0)]), 220*1500*sizeof(float), hipMemcpyHostToDevice)); // gpuErrchk(hipMemcpy(&(data[IDX(231, 300, 0)]), d_data, 220*1500*sizeof(float), hipMemcpyDeviceToHost)); // for (int i = 0; i < 10; i ++){ // std::cout << "data[" << i << "] = " << data[IDX(231,300,i)] << std::endl; // } /*************************************************** Calculate Constants ***************************************************/ float c = 299792458.0; // float wavelength; float mv = ((float) SZ)/(2.0*pos_extent)* c/(f_tot_end*1E9)/px_per_res_elem; std::cout << "mv: " << mv <<std::endl; std::cout << "pos_extent: " << pos_extent << std::endl; std::cout << "other: " << c/(f_tot_end*1E9)/px_per_res_elem << std::endl; int idx; hipfftComplex thresh; thresh.x = 90000.0; thresh.y = 0; thrust::device_vector<hipfftComplex> d_value(N_DUMPPOINTS); thrust::device_vector<int> d_index(N_DUMPPOINTS); thrust::host_vector<hipfftComplex> h_value(N_DUMPPOINTS); thrust::host_vector<int> h_index(N_DUMPPOINTS); /*************************************************** Start continuous Loop ***************************************************/ int time[N_STREAMS]; for (int i = 0; i < N_STREAMS; i++){ time[i] = 220 + i; // t_data[i] = &(d_C[SZ*SZ]); } int EXIT_FLAG = 0; while(!EXIT_FLAG){ for (int s = 0; s < N_STREAMS; s ++){ gpuErrchk(hipStreamSynchronize(stream[s])); START_TIMER(); int img_strt = s*SZ*SZ; std::cout << "time: " << time[s] << ", stream: " << s << std::endl; // Memcpy minimum data required for gridding. gpuErrchk(hipMemcpyAsync(&(d_data[time[s] % N_TIMESTEPS]), &(data[IDX(time[s], idx_usable_start, 0)]), n_floats_per_freq*n_usable_idx*sizeof(float), hipMemcpyHostToDevice, stream[s])); /* TODO: copy data into correct block of memory Identify which data is needed when */ // memset(A, 0, SZ*SZ*sizeof(hipfftComplex)); // memset(C, 0, SZ*SZ*sizeof(hipfftComplex)); // memset(norm, 0, SZ*SZ*sizeof(float)); // reset workspaces to zero gpuErrchk(hipMemsetAsync(&(d_A[img_strt]), 0, SZ*SZ*sizeof(hipfftComplex), stream[s])); gpuErrchk(hipMemsetAsync(&(d_norm[img_strt]), 0, SZ*SZ*sizeof(float), stream[s])); std::cout << "calling kernel" << std::endl; hipLaunchKernelGGL(( grid), dim3(1),dim3(45),0,stream[s], &(d_A[img_strt]), &(d_data[time[s]]), d_A_cal, d_B_cal, 250, 500); std::cout << "Done Filling Matrix" << std::endl; // gpuErrchk(hipMemcpy(d_A, A, sizeof(hipfftComplex)*SZ*SZ, hipMemcpyHostToDevice)); gpuFFTchk(hipfftExecC2C(plan[s], &(d_A[img_strt]), &(d_C[img_strt]), HIPFFT_FORWARD)); // real<<<20,32, 0, stream[s]>>>(&(d_C[img_strt]), SZ*SZ); thrust::device_ptr <hipfftComplex> t_data(&(d_C[img_strt])); int ans = thrust::copy_if(thrust::hip::par.on(stream[s]), make_zip_iterator(make_tuple(t_data, make_counting_iterator(0u))), make_zip_iterator(make_tuple(t_data, make_counting_iterator(0u))) + SZ*SZ, t_data, make_zip_iterator(make_tuple(d_value.begin(), d_index.begin())), greater_than_val<hipfftComplex>(thresh) ) - make_zip_iterator(make_tuple(d_value.begin(), d_index.begin())); h_value = d_value; h_index = d_index; gpuErrchk(hipMemcpyAsync(&(C[img_strt]), &(d_C[img_strt]), sizeof(hipfftComplex)*SZ*SZ, hipMemcpyDeviceToHost, stream[s])); std::cout << "number of points = " << ans << std::endl; float maxx = 0; int indexx = 0; for (int i = 0; i < ans; i++){ if (h_value[i].x > maxx){ // std::cout << "h: " << h_value[i].x; maxx = h_value[i].x; indexx = h_index[i]; } } std::cout << "max value by copy_if = " << maxx << ", at location = " << indexx << std::endl; /* TODO: Do copy_if Only copy the copyif vector, also overflow signal for dumping all the data send data for comparison Calculate coincidences (std::map) At end, search through list of all arrays, if proximate data points, include in sum. */ /* Calculate Statistics */ float max = 0; float avg = 0; float mag = 0; for (int i = 0; i < SZ*SZ; i++){ mag = C[i+img_strt].x;//std::abs(C[i+img_strt].x);//*C[i].x + C[i].y*C[i].y); if (mag > max){ max = mag; } avg += mag; } avg /= SZ*SZ; float stdr = 0; for (int i = 0; i < SZ*SZ; i++){ mag = C[i+img_strt].x;//std::sqrt(C[i+img_strt].x*C[i+img_strt].x);// + C[i+img_strt].y*C[i+img_strt].y); stdr += (avg - mag)*(avg - mag); } stdr = std::sqrt(stdr/(SZ*SZ-1)); std::cout << "time: " << time[s] << ", max: " << max << ", avg: " << avg << ", std: " << stdr << std::endl; std::cout << "Writing to disk"<< std::endl; float cmax = 2000000.0; float cmin = 0; float abs = 0; // write to image, flip spectra for (int i = 0; i < SZ*SZ; i++){ abs = std::sqrt(C[i+img_strt].x*C[i+img_strt].x);// + C[i+img_strt].y*C[i+img_strt].y); unsigned int temp = static_cast<unsigned int> (999*((abs-cmin)/(cmax-cmin))); if (temp > 999){ temp = 999; } rgb_t px_color = jet_colormap[temp]; //remap based on flipped array if(i < SZ*SZ/2){ //first half of array, adding if((i%SZ) < SZ/2){ //up left quadrent idx = i + SZ*SZ/2 + SZ/2; } else { //up right quadrent idx = i + SZ*SZ/2 - SZ/2; } } else{ if((i%SZ) < SZ/2){ //dwn left quadrent idx = i - (SZ*SZ/2 - SZ/2); } else { //dwm right quadrent idx = i - (SZ*SZ/2 + SZ/2); } } img[4*idx + 4*img_strt] = px_color.red; img[4*idx+1 + 4*img_strt] = px_color.green; img[4*idx+2 + 4*img_strt] = px_color.blue; img[4*idx+3 + 4*img_strt] = 255; // std::cout << "img: "<< temp << std::endl; } image[s].create(SZ, SZ, &(img[SZ*SZ*4*s])); //Create and save image oss.str(""); oss << std::setfill('0') << std::to_string(time[s]) << std::setw(6); title = "cppFrames/" + oss.str() + ".jpg"; std::cout << "title: " << title << std::endl; image[s].saveToFile(title);//"Frames6/" + std::to_string(kp) + img_extension); STOP_RECORD_TIMER(gpu_time_ms_solve);// avg_gpu_time += gpu_time_ms_solve; avg_gpu_time_cnt += 1; std::cout << "Done (Fourier) Transforming in " << gpu_time_ms_solve <<" ms \n" << std::endl; if (time[s] == 240){ EXIT_FLAG = 1; break; } time[s] += N_STREAMS; } } // Garbage collection delete[] C; gpuErrchk(hipFree(d_A)); gpuErrchk(hipFree(d_C)); gpuErrchk(hipFree(d_data)); gpuErrchk(hipFree(d_A_cal)); gpuErrchk(hipFree(d_B_cal)); // gpuFFTchk(hipfftDestroy(plan)); for (int i = 0; i < N_STREAMS; i++){ gpuErrchk(hipStreamDestroy(stream[i])); gpuFFTchk(hipfftDestroy(plan[i])); } // delete[] norm; gpuErrchk(hipHostUnregister(data)); delete[] data; fits_close_file(fptr, &status); return 0; }
0358e6bf2248715801090bb01703efd52cd33b77.cu
#include <iostream> #include <complex> #include <sstream> #include <iomanip> #include <string> #include <stdio.h> // printf for device functions #include <SFML/Graphics.hpp> #include "fitsio.h" // fits files for fake data #include "bitmap_image.hpp" // colorscheme #include <cufft.h> #include <curand.h> #include <cuda_runtime.h> // nvcc -o gpu_vis_streamed gpu_vis_streamed.cu -lsfml-graphics -lcfitsio -O2 -lcufft -std=c++11 -Wno-deprecated-gpu-targets #define IDX(t,f,n) (((t)*220*2048) + ((f)*220) + (n)) #define SZ 1024 #define N_ANTENNAS 10 #define N_BASELINES (N_ANTENNAS - 1)*(N_ANTENNAS)/2 #define N_STREAMS 5 #define N_TIMESTEPS 300 #define N_DUMPPOINTS 10000 #include "gpu_vis_streamed.cuh" int main (){ /*************************************************** Constants of operation ***************************************************/ int idx_usable_start = 300; int idx_usable_end = 1800; int n_usable_idx = idx_usable_end - idx_usable_start; // int tot_f_index = 2048; // float f_total_start = 1.28; float f_tot_end = 1.53; // float bw = f_tot_end - f_total_start; // int n_antennas = 10; // float pos[10][3] = {{ 2.32700165e-05, 4.43329978e+02, 1.02765904e-05}, // { 2.21098953e-05, 4.36579978e+02, 9.64180626e-06}, // { 1.14039406e-05, 3.42179978e+02, 4.94604756e-06}, // {-1.14039406e-05, 1.53679978e+02, -4.94604756e-06}, // { 1.83569971e+02, -4.37387873e-05, 1.57832354e-05}, // {-1.03180029e+02, -1.03154892e-05, -2.81970909e-05}, // {-1.76180029e+02, -1.63615256e-06, -3.96178686e-05}, // // { 1.90319971e+02, -4.48715200e-05, 1.72737512e-05}, // {-2.03330030e+02, 1.63615256e-06, -4.39237587e-05}, // {-9.91080054e+02, -2.16759905e+02, -4.00017642e+00}}; // int ant_order[] = {1, 4, 5, 8, 6, 9, 2, 10, 3, 7}; float pos_extent = 1299.54002797; // std::cout << "Hello, position: " << pos[3][2] << std::endl; /*************************************************** Datastructures for moving data ***************************************************/ sf::Uint8 *img = new sf::Uint8[SZ*SZ*N_STREAMS*4]; // temporary image if (!img){ std::cerr << "img not allocated" << std::endl;} sf::Image image[N_STREAMS]; // SFML image object std::ostringstream oss; std::string title; // gpu timers float gpu_time_ms_solve = -1; float avg_gpu_time = 0; float avg_gpu_time_cnt = 0; // FFT plans and workspaces // fftw_plan plan; // fftw_complex *A, *C; // A = (fftw_complex*) fftw_malloc(SZ*SZ*sizeof(fftw_complex)); // C = (fftw_complex*) fftw_malloc(SZ*SZ*sizeof(fftw_complex)); // cufftComplex *A = (cufftComplex*) malloc(sizeof(cufftComplex)*SZ*SZ); // cufftComplex *C = (cufftComplex*) malloc(sizeof(cufftComplex)*SZ*SZ); // cufftComplex *A = new cufftComplex[SZ*SZ]; //(sizeof(cufftComplex)*SZ*SZ); cufftComplex *C = new cufftComplex[SZ*SZ*N_STREAMS]; if (!C){std::cerr << "Matricies not allocated" << std::endl;} cufftComplex *d_A, *d_C, *d_A_cal, *d_B_cal; float *d_data, *d_norm; int n_pol = 2; int n_complex = 2; int n_floats_per_freq = (N_BASELINES + 10)*n_pol*n_complex; // # floats per frequency gpuErrchk(cudaMalloc(&d_A, SZ*SZ*N_STREAMS*sizeof(cufftComplex))); gpuErrchk(cudaMalloc(&d_C, SZ*SZ*N_STREAMS*sizeof(cufftComplex))); gpuErrchk(cudaMalloc(&d_norm, SZ*SZ*N_STREAMS*sizeof(float))); gpuErrchk(cudaMalloc(&d_A_cal, N_BASELINES*n_usable_idx*sizeof(cufftComplex) )); gpuErrchk(cudaMalloc(&d_B_cal, N_BASELINES*n_usable_idx*sizeof(cufftComplex) )); gpuErrchk(cudaMalloc(&d_data, n_floats_per_freq*n_usable_idx*N_TIMESTEPS*sizeof(float))); // int n_baselines = 55; // gpuErrchk(cudaMalloc( &d_data, n_floats_per_freq*n_usable_idx*sizeof(float))); //d_data is indexed by [frequency, baseline] /*************************************************** Get visibility corrections and Upload to GPU ***************************************************/ // A_cal is indexed by [baseline, frequency] std::ifstream A_real, A_imag, B_real, B_imag; A_real.open("AA_real.txt"); A_imag.open("AA_imag.txt"); B_real.open("BB_real.txt"); B_imag.open("BB_imag.txt"); cufftComplex* A_cal = new cufftComplex[N_BASELINES*n_usable_idx]; cufftComplex* B_cal = new cufftComplex[N_BASELINES*n_usable_idx]; if (!A_cal){ std::cerr << "A_cal not allocated" << std::endl;} if (!B_cal){ std::cerr << "B_cal not allocated" << std::endl;} for (int i = 0; i < N_BASELINES; i++){ for (int j = 0; j < n_usable_idx; j++){ A_real >> A_cal[i*n_usable_idx + j].x; A_imag >> A_cal[i*n_usable_idx + j].y; B_real >> B_cal[i*n_usable_idx + j].x; B_imag >> B_cal[i*n_usable_idx + j].y; } } gpuErrchk(cudaMemcpy(d_A_cal, A_cal, N_BASELINES*n_usable_idx*sizeof(cufftComplex), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_B_cal, B_cal, N_BASELINES*n_usable_idx*sizeof(cufftComplex), cudaMemcpyHostToDevice)); delete[] A_cal; delete[] B_cal; /*************************************************** Make Plan for FFT ***************************************************/ std::cout << "Got corrections" << std::endl; cufftHandle plan[N_STREAMS]; cudaStream_t stream[N_STREAMS]; for (int i = 0; i < N_STREAMS; i++){ gpuErrchk(cudaStreamCreate(&(stream[i]))); gpuFFTchk(cufftPlan2d(&(plan[i]), SZ, SZ, CUFFT_C2C)); } // plan = fftw_plan_dft_2d(SZ, SZ, A, C, FFTW_FORWARD, FFTW_ESTIMATE); std::cout << "Made plan" << std::endl; // float* norm = new float[SZ*SZ]; /*************************************************** Pull fits data into memory ***************************************************/ fitsfile *fptr; int status = 0; int ncols = 0; long nrows = 0; int px_per_res_elem = 3; std::cout << "Opening data file" << std::endl; fits_open_data(&fptr, "test.fits", READONLY, &status); std::cout << "Opened data file" <<std::endl; fits_get_num_rows(fptr, &nrows, &status); fits_get_num_cols(fptr, &ncols, &status); std::cout <<"rows: " << nrows <<" cols: " << ncols << std::endl; std::cout << "Frequencies: " << n_usable_idx << std::endl; int column_number = 1; int null_values; float *data = new float[nrows]; gpuErrchk(cudaHostRegister(data, nrows*sizeof(float), cudaHostRegisterPortable)); fits_read_col(fptr, TFLOAT, column_number, 1, 1, nrows, NULL, data, &null_values , &status); std::cout << "Done Reading" << std::endl; // for (int i = 0; i < 10; i++){ // std::cout << "data[" << i << "] = "<< data[IDX(231,300,i)] << std::endl; // } // gpuErrchk(cudaMemcpy(d_data, &(data[IDX(231, 300, 0)]), 220*1500*sizeof(float), cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemcpy(&(data[IDX(231, 300, 0)]), d_data, 220*1500*sizeof(float), cudaMemcpyDeviceToHost)); // for (int i = 0; i < 10; i ++){ // std::cout << "data[" << i << "] = " << data[IDX(231,300,i)] << std::endl; // } /*************************************************** Calculate Constants ***************************************************/ float c = 299792458.0; // float wavelength; float mv = ((float) SZ)/(2.0*pos_extent)* c/(f_tot_end*1E9)/px_per_res_elem; std::cout << "mv: " << mv <<std::endl; std::cout << "pos_extent: " << pos_extent << std::endl; std::cout << "other: " << c/(f_tot_end*1E9)/px_per_res_elem << std::endl; int idx; cufftComplex thresh; thresh.x = 90000.0; thresh.y = 0; thrust::device_vector<cufftComplex> d_value(N_DUMPPOINTS); thrust::device_vector<int> d_index(N_DUMPPOINTS); thrust::host_vector<cufftComplex> h_value(N_DUMPPOINTS); thrust::host_vector<int> h_index(N_DUMPPOINTS); /*************************************************** Start continuous Loop ***************************************************/ int time[N_STREAMS]; for (int i = 0; i < N_STREAMS; i++){ time[i] = 220 + i; // t_data[i] = &(d_C[SZ*SZ]); } int EXIT_FLAG = 0; while(!EXIT_FLAG){ for (int s = 0; s < N_STREAMS; s ++){ gpuErrchk(cudaStreamSynchronize(stream[s])); START_TIMER(); int img_strt = s*SZ*SZ; std::cout << "time: " << time[s] << ", stream: " << s << std::endl; // Memcpy minimum data required for gridding. gpuErrchk(cudaMemcpyAsync(&(d_data[time[s] % N_TIMESTEPS]), &(data[IDX(time[s], idx_usable_start, 0)]), n_floats_per_freq*n_usable_idx*sizeof(float), cudaMemcpyHostToDevice, stream[s])); /* TODO: copy data into correct block of memory Identify which data is needed when */ // memset(A, 0, SZ*SZ*sizeof(cufftComplex)); // memset(C, 0, SZ*SZ*sizeof(cufftComplex)); // memset(norm, 0, SZ*SZ*sizeof(float)); // reset workspaces to zero gpuErrchk(cudaMemsetAsync(&(d_A[img_strt]), 0, SZ*SZ*sizeof(cufftComplex), stream[s])); gpuErrchk(cudaMemsetAsync(&(d_norm[img_strt]), 0, SZ*SZ*sizeof(float), stream[s])); std::cout << "calling kernel" << std::endl; grid<<<1,45,0,stream[s]>>>(&(d_A[img_strt]), &(d_data[time[s]]), d_A_cal, d_B_cal, 250, 500); std::cout << "Done Filling Matrix" << std::endl; // gpuErrchk(cudaMemcpy(d_A, A, sizeof(cufftComplex)*SZ*SZ, cudaMemcpyHostToDevice)); gpuFFTchk(cufftExecC2C(plan[s], &(d_A[img_strt]), &(d_C[img_strt]), CUFFT_FORWARD)); // real<<<20,32, 0, stream[s]>>>(&(d_C[img_strt]), SZ*SZ); thrust::device_ptr <cufftComplex> t_data(&(d_C[img_strt])); int ans = thrust::copy_if(thrust::cuda::par.on(stream[s]), make_zip_iterator(make_tuple(t_data, make_counting_iterator(0u))), make_zip_iterator(make_tuple(t_data, make_counting_iterator(0u))) + SZ*SZ, t_data, make_zip_iterator(make_tuple(d_value.begin(), d_index.begin())), greater_than_val<cufftComplex>(thresh) ) - make_zip_iterator(make_tuple(d_value.begin(), d_index.begin())); h_value = d_value; h_index = d_index; gpuErrchk(cudaMemcpyAsync(&(C[img_strt]), &(d_C[img_strt]), sizeof(cufftComplex)*SZ*SZ, cudaMemcpyDeviceToHost, stream[s])); std::cout << "number of points = " << ans << std::endl; float maxx = 0; int indexx = 0; for (int i = 0; i < ans; i++){ if (h_value[i].x > maxx){ // std::cout << "h: " << h_value[i].x; maxx = h_value[i].x; indexx = h_index[i]; } } std::cout << "max value by copy_if = " << maxx << ", at location = " << indexx << std::endl; /* TODO: Do copy_if Only copy the copyif vector, also overflow signal for dumping all the data send data for comparison Calculate coincidences (std::map) At end, search through list of all arrays, if proximate data points, include in sum. */ /* Calculate Statistics */ float max = 0; float avg = 0; float mag = 0; for (int i = 0; i < SZ*SZ; i++){ mag = C[i+img_strt].x;//std::abs(C[i+img_strt].x);//*C[i].x + C[i].y*C[i].y); if (mag > max){ max = mag; } avg += mag; } avg /= SZ*SZ; float stdr = 0; for (int i = 0; i < SZ*SZ; i++){ mag = C[i+img_strt].x;//std::sqrt(C[i+img_strt].x*C[i+img_strt].x);// + C[i+img_strt].y*C[i+img_strt].y); stdr += (avg - mag)*(avg - mag); } stdr = std::sqrt(stdr/(SZ*SZ-1)); std::cout << "time: " << time[s] << ", max: " << max << ", avg: " << avg << ", std: " << stdr << std::endl; std::cout << "Writing to disk"<< std::endl; float cmax = 2000000.0; float cmin = 0; float abs = 0; // write to image, flip spectra for (int i = 0; i < SZ*SZ; i++){ abs = std::sqrt(C[i+img_strt].x*C[i+img_strt].x);// + C[i+img_strt].y*C[i+img_strt].y); unsigned int temp = static_cast<unsigned int> (999*((abs-cmin)/(cmax-cmin))); if (temp > 999){ temp = 999; } rgb_t px_color = jet_colormap[temp]; //remap based on flipped array if(i < SZ*SZ/2){ //first half of array, adding if((i%SZ) < SZ/2){ //up left quadrent idx = i + SZ*SZ/2 + SZ/2; } else { //up right quadrent idx = i + SZ*SZ/2 - SZ/2; } } else{ if((i%SZ) < SZ/2){ //dwn left quadrent idx = i - (SZ*SZ/2 - SZ/2); } else { //dwm right quadrent idx = i - (SZ*SZ/2 + SZ/2); } } img[4*idx + 4*img_strt] = px_color.red; img[4*idx+1 + 4*img_strt] = px_color.green; img[4*idx+2 + 4*img_strt] = px_color.blue; img[4*idx+3 + 4*img_strt] = 255; // std::cout << "img: "<< temp << std::endl; } image[s].create(SZ, SZ, &(img[SZ*SZ*4*s])); //Create and save image oss.str(""); oss << std::setfill('0') << std::to_string(time[s]) << std::setw(6); title = "cppFrames/" + oss.str() + ".jpg"; std::cout << "title: " << title << std::endl; image[s].saveToFile(title);//"Frames6/" + std::to_string(kp) + img_extension); STOP_RECORD_TIMER(gpu_time_ms_solve);// avg_gpu_time += gpu_time_ms_solve; avg_gpu_time_cnt += 1; std::cout << "Done (Fourier) Transforming in " << gpu_time_ms_solve <<" ms \n" << std::endl; if (time[s] == 240){ EXIT_FLAG = 1; break; } time[s] += N_STREAMS; } } // Garbage collection delete[] C; gpuErrchk(cudaFree(d_A)); gpuErrchk(cudaFree(d_C)); gpuErrchk(cudaFree(d_data)); gpuErrchk(cudaFree(d_A_cal)); gpuErrchk(cudaFree(d_B_cal)); // gpuFFTchk(cufftDestroy(plan)); for (int i = 0; i < N_STREAMS; i++){ gpuErrchk(cudaStreamDestroy(stream[i])); gpuFFTchk(cufftDestroy(plan[i])); } // delete[] norm; gpuErrchk(cudaHostUnregister(data)); delete[] data; fits_close_file(fptr, &status); return 0; }
b012a765f82d40b4d67e3827c68a4edd1580cbfc.hip
// !!! This is a file automatically generated by hipify!!! #include "aes.h" #include "chacha.h" #include <stdio.h> #include <inttypes.h> #include "perftime.h" #include <algorithm> void print_bytes(const char* name, const uint8_t* input, size_t len) { printf("%s:\n", name); for (size_t i = 0; i < ::min(len, (size_t)64); i++) { //if ((i % 1024) == 0) { printf("%x ", input[i]); //} } printf("\n"); } uint32_t verbose_memcmp(void* a, void* b, size_t size) { uint8_t* a8 = (uint8_t*)a; uint8_t* b8 = (uint8_t*)b; uint32_t num_errors = 0; for (size_t j = 0; j < size; j++) { if (a8[j] != b8[j]) { if (num_errors < 1) { printf("mismatch @(j=%zu) ref: %d actual: %d\n", j, a8[j], b8[j]); } num_errors++; } } return num_errors; } typedef struct { size_t len; uint32_t num_keys; uint8_t* input; uint8_t* output; uint8_t* output_ref; uint8_t ivec_orig[AES_BLOCK_SIZE]; uint8_t ivec[AES_BLOCK_SIZE]; uint8_t ivec_ref[AES_BLOCK_SIZE]; uint8_t chacha_ivec[CHACHA_BLOCK_SIZE]; uint8_t chacha_ivec_orig[CHACHA_BLOCK_SIZE]; uint8_t chacha_ivec_ref[CHACHA_BLOCK_SIZE]; } ctx_t; void free_ctx(ctx_t* ctx) { free(ctx->input); free(ctx->output); free(ctx->output_ref); } void clear_ctx(ctx_t* ctx) { memset(ctx->input, 0, ctx->len); memset(ctx->output, 0, ctx->len); memset(ctx->output_ref, 0, ctx->len); } int test_chacha_cbc_sample(ctx_t* gctx) { printf("Starting gpu cbc chacha..\n"); uint8_t key[CHACHA_KEY_SIZE] = {0}; for (int i = 0; i < CHACHA_KEY_SIZE; i++) { key[i] = i; } cuda_chacha20_cbc_encrypt(gctx->input, gctx->output_ref, gctx->len, key, gctx->chacha_ivec); memcpy(gctx->chacha_ivec_ref, gctx->chacha_ivec, sizeof(gctx->chacha_ivec)); printf("\n\n"); print_bytes("output_ref", gctx->output_ref, gctx->len); int iterations = 1; perftime_t start, end; get_time(&start); for (int i = 0; i < iterations; i++) { cuda_chacha20_cbc_encrypt(gctx->input, gctx->output, gctx->len, key, gctx->chacha_ivec); } get_time(&end); print_bytes("output", gctx->output, gctx->len); float time_us = get_diff(&start, &end); float ns_per_byte = 1000.f * time_us / ((float)iterations * (float)gctx->len); printf("time: %f ns/byte time: %f us\n", ns_per_byte, time_us); uint8_t* outputs = (uint8_t*)calloc(gctx->len, gctx->num_keys); uint8_t* ivecs = (uint8_t*)calloc(CHACHA_BLOCK_SIZE, gctx->num_keys); uint8_t* keys = (uint8_t*)calloc(CHACHA_KEY_SIZE, gctx->num_keys); for (uint32_t i = 0; i < gctx->num_keys; i++) { memcpy(&keys[i * CHACHA_KEY_SIZE], key, CHACHA_KEY_SIZE); memcpy(&ivecs[i * CHACHA_BLOCK_SIZE], gctx->chacha_ivec_orig, CHACHA_BLOCK_SIZE); } uint64_t samples[1] = {0}; chacha_cbc_encrypt_many_sample((uint8_t*)gctx->input, outputs, gctx->len, keys, ivecs, gctx->num_keys, samples, 1, 0, &time_us); ns_per_byte = 1000.f * time_us / ((float)gctx->len * (float)gctx->num_keys); printf("gpu time: %f ns/byte time: %f us\n", ns_per_byte, time_us); int output_errors = 0, ivec_errors = 0; for (uint32_t i = 0; i < gctx->num_keys; i++) { if (0 != verbose_memcmp(gctx->output_ref, &outputs[i * gctx->len], gctx->len)) { if (output_errors < 10) { printf("%d gpu output not matching! %x\n", i, outputs[0]); } output_errors++; break; } if (0 != verbose_memcmp(gctx->chacha_ivec_ref, &ivecs[i * CHACHA_BLOCK_SIZE], CHACHA_BLOCK_SIZE)) { if (ivec_errors < 1) { printf("%d ivecs output not matching! %x\n", i, ivecs[0]); } ivec_errors++; } } printf("total keys: %d output_errors: %d ivec_errors: %d\n", gctx->num_keys, output_errors, ivec_errors); print_bytes("gpu output", outputs, gctx->len); print_bytes("gpu ivec", ivecs, CHACHA_BLOCK_SIZE); free(outputs); free(ivecs); free(keys); return 0; } int test_chacha_cbc(ctx_t* gctx) { printf("Starting gpu cbc chacha..\n"); uint8_t key[CHACHA_KEY_SIZE] = {0}; for (int i = 0; i < CHACHA_KEY_SIZE; i++) { key[i] = i; } cuda_chacha20_cbc_encrypt(gctx->input, gctx->output_ref, gctx->len, key, gctx->chacha_ivec); memcpy(gctx->chacha_ivec_ref, gctx->chacha_ivec, sizeof(gctx->chacha_ivec)); printf("\n\n"); print_bytes("output_ref", gctx->output_ref, gctx->len); int iterations = 1; perftime_t start, end; get_time(&start); for (int i = 0; i < iterations; i++) { cuda_chacha20_cbc_encrypt(gctx->input, gctx->output, gctx->len, key, gctx->chacha_ivec); } get_time(&end); print_bytes("output", gctx->output, gctx->len); float time_us = get_diff(&start, &end); float ns_per_byte = 1000.f * time_us / ((float)iterations * (float)gctx->len); printf("time: %f ns/byte time: %f us\n", ns_per_byte, time_us); uint8_t* outputs = (uint8_t*)calloc(gctx->len, gctx->num_keys); uint8_t* ivecs = (uint8_t*)calloc(CHACHA_BLOCK_SIZE, gctx->num_keys); uint8_t* keys = (uint8_t*)calloc(CHACHA_KEY_SIZE, gctx->num_keys); for (uint32_t i = 0; i < gctx->num_keys; i++) { memcpy(&keys[i * CHACHA_KEY_SIZE], key, CHACHA_KEY_SIZE); memcpy(&ivecs[i * CHACHA_BLOCK_SIZE], gctx->chacha_ivec_orig, CHACHA_BLOCK_SIZE); } chacha_cbc_encrypt_many((uint8_t*)gctx->input, outputs, gctx->len, keys, ivecs, gctx->num_keys, &time_us); ns_per_byte = 1000.f * time_us / ((float)gctx->len * (float)gctx->num_keys); printf("gpu time: %f ns/byte time: %f us\n", ns_per_byte, time_us); int output_errors = 0, ivec_errors = 0; for (uint32_t i = 0; i < gctx->num_keys; i++) { if (0 != verbose_memcmp(gctx->output_ref, &outputs[i * gctx->len], gctx->len)) { if (output_errors < 10) { printf("%d gpu output not matching! %x\n", i, outputs[0]); } output_errors++; break; } if (0 != verbose_memcmp(gctx->chacha_ivec_ref, &ivecs[i * CHACHA_BLOCK_SIZE], CHACHA_BLOCK_SIZE)) { if (ivec_errors < 1) { printf("%d ivecs output not matching! %x\n", i, ivecs[0]); } ivec_errors++; } } printf("total keys: %d output_errors: %d ivec_errors: %d\n", gctx->num_keys, output_errors, ivec_errors); print_bytes("gpu output", outputs, gctx->len); print_bytes("gpu ivec", ivecs, CHACHA_BLOCK_SIZE); free(outputs); free(ivecs); free(keys); return 0; } int test_chacha_ctr(ctx_t* gctx) { printf("Starting gpu ctr chacha..\n"); uint8_t key[CHACHA_KEY_SIZE] = {0}; uint8_t nonce[CHACHA_NONCE_SIZE] = {0}; for (int i = 0; i < CHACHA_KEY_SIZE; i++) { key[i] = i; } for (int i = 0; i < CHACHA_NONCE_SIZE; i++) { nonce[i] = i; } chacha20_ctr_encrypt(gctx->input, gctx->output_ref, gctx->len, key, nonce, 0); printf("\n\n"); print_bytes("output_ref", gctx->output_ref, gctx->len); int iterations = 1; perftime_t start, end; get_time(&start); for (int i = 0; i < iterations; i++) { chacha20_ctr_encrypt(gctx->input, gctx->output, gctx->len, key, nonce, i); } get_time(&end); print_bytes("output", gctx->output, gctx->len); float time_us = get_diff(&start, &end); float ns_per_byte = 1000.f * time_us / ((float)iterations * (float)gctx->len); printf("time: %f ns/byte time: %f us\n", ns_per_byte, time_us); uint8_t* outputs = (uint8_t*)calloc(gctx->len, gctx->num_keys); uint8_t* nonces = (uint8_t*)calloc(CHACHA_NONCE_SIZE, gctx->num_keys); uint8_t* keys = (uint8_t*)calloc(CHACHA_KEY_SIZE, gctx->num_keys); for (uint32_t i = 0; i < gctx->num_keys; i++) { memcpy(&keys[i * CHACHA_KEY_SIZE], key, CHACHA_KEY_SIZE); memcpy(&nonces[i * CHACHA_NONCE_SIZE], nonce, CHACHA_NONCE_SIZE); } chacha_ctr_encrypt_many((uint8_t*)gctx->input, outputs, gctx->len, keys, nonces, gctx->num_keys, &time_us); ns_per_byte = 1000.f * time_us / ((float)gctx->len * (float)gctx->num_keys); printf("gpu time: %f ns/byte time: %f us\n", ns_per_byte, time_us); int output_errors = 0, ivec_errors = 0; for (uint32_t i = 0; i < gctx->num_keys; i++) { if (0 != verbose_memcmp(gctx->output_ref, &outputs[i * gctx->len], gctx->len)) { if (output_errors < 10) { printf("%d gpu output not matching! %x\n", i, outputs[0]); } output_errors++; break; } } printf("total keys: %d output_errors: %d ivec_errors: %d\n", gctx->num_keys, output_errors, ivec_errors); print_bytes("gpu output", outputs, gctx->len); free(outputs); free(nonces); free(keys); return 0; } int test_aes(ctx_t* gctx) { printf("Starting gpu aes..\n"); AES_KEY key; unsigned char key_str[16] = "foobar"; int res = AES_set_encrypt_key(key_str, 128, &key); if (res == 0) { printf("we have a key.. rounds=%d\n", key.rounds); } else { printf("error!: %d\n", res); return 1; } AES_cbc_encrypt(gctx->input, gctx->output_ref, gctx->len, &key, gctx->ivec, AES_ENCRYPT); memcpy(gctx->ivec_ref, gctx->ivec, sizeof(gctx->ivec)); printf("\n\n"); print_bytes("output_ref", gctx->output_ref, gctx->len); int iterations = 1; perftime_t start, end; get_time(&start); for (int i = 0; i < iterations; i++) { AES_cbc_encrypt(gctx->input, gctx->output, gctx->len, &key, gctx->ivec, AES_ENCRYPT); } get_time(&end); print_bytes("output", gctx->output, gctx->len); float time_us = get_diff(&start, &end); float ns_per_byte = 1000.f * time_us / ((float)iterations * (float)gctx->len); printf("time: %f ns/byte time: %f us\n", ns_per_byte, time_us); uint8_t* outputs = (uint8_t*)calloc(gctx->len, gctx->num_keys); uint8_t* ivecs = (uint8_t*)calloc(16, gctx->num_keys); AES_KEY* keys = (AES_KEY*)calloc(sizeof(AES_KEY), gctx->num_keys); for (uint32_t i = 0; i < gctx->num_keys; i++) { int res = AES_set_encrypt_key(key_str, 128, &keys[i]); if (res != 0) { printf("Something wrong res: %d\n", res); return 1; } memcpy(&ivecs[i * AES_BLOCK_SIZE], gctx->ivec_orig, AES_BLOCK_SIZE); } AES_cbc_encrypt_many(gctx->input, outputs, gctx->len, keys, ivecs, gctx->num_keys, &time_us); ns_per_byte = 1000.f * time_us / ((float)gctx->len * (float)gctx->num_keys); printf("gpu time: %f ns/byte time: %f us\n", ns_per_byte, time_us); int output_errors = 0, ivec_errors = 0; for (uint32_t i = 0; i < gctx->num_keys; i++) { if (0 != verbose_memcmp(gctx->output_ref, &outputs[i * gctx->len], gctx->len)) { if (output_errors < 10) { printf("%d gpu output not matching! %x\n", i, outputs[0]); } output_errors++; break; } if (0 != verbose_memcmp(gctx->ivec_ref, &ivecs[i * 16], 16)) { if (ivec_errors < 1) { printf("%d ivecs output not matching! %x\n", i, ivecs[0]); } ivec_errors++; } } printf("total keys: %d output_errors: %d ivec_errors: %d\n", gctx->num_keys, output_errors, ivec_errors); print_bytes("gpu output", outputs, gctx->len); print_bytes("gpu ivec", ivecs, 16); free(outputs); free(ivecs); free(keys); return 0; } int main(int argc, const char* argv[]) { printf("Starting gpu crypto..\n"); ctx_t ctx = {0}; ctx.len = 64; ctx.num_keys = 4; int arg = 1; if (arg < argc) { ctx.num_keys = strtol(argv[arg++], NULL, 10); } if (arg < argc) { ctx.len = strtol(argv[arg++], NULL, 10); } if (ctx.num_keys == 0) { printf("ERROR: num_keys == 0!\n"); return 1; } printf("num_keys: %d len: %zu\n", ctx.num_keys, ctx.len); ctx.input = (uint8_t*)calloc(ctx.len, 1); ctx.output = (uint8_t*)calloc(ctx.len, 1); ctx.output_ref = (uint8_t*)calloc(ctx.len, 1); uint8_t ivec_orig[16] = {0xde, 0xad, 0xbe, 0xef}; memcpy(ctx.ivec_orig, ivec_orig, sizeof(ctx.ivec_orig)); memcpy(ctx.ivec, ivec_orig, sizeof(ctx.ivec)); uint8_t chacha_ivec_orig[CHACHA_BLOCK_SIZE] = {0xde, 0xad, 0xbe, 0xef}; memcpy(ctx.chacha_ivec_orig, chacha_ivec_orig, sizeof(ctx.chacha_ivec_orig)); memcpy(ctx.chacha_ivec, chacha_ivec_orig, sizeof(ctx.chacha_ivec)); //test_aes(&ctx); clear_ctx(&ctx); //test_chacha_ctr(&ctx); //clear_ctx(&ctx); //test_chacha_cbc(&ctx); test_chacha_cbc_sample(&ctx); free_ctx(&ctx); return 0; }
b012a765f82d40b4d67e3827c68a4edd1580cbfc.cu
#include "aes.h" #include "chacha.h" #include <stdio.h> #include <inttypes.h> #include "perftime.h" #include <algorithm> void print_bytes(const char* name, const uint8_t* input, size_t len) { printf("%s:\n", name); for (size_t i = 0; i < std::min(len, (size_t)64); i++) { //if ((i % 1024) == 0) { printf("%x ", input[i]); //} } printf("\n"); } uint32_t verbose_memcmp(void* a, void* b, size_t size) { uint8_t* a8 = (uint8_t*)a; uint8_t* b8 = (uint8_t*)b; uint32_t num_errors = 0; for (size_t j = 0; j < size; j++) { if (a8[j] != b8[j]) { if (num_errors < 1) { printf("mismatch @(j=%zu) ref: %d actual: %d\n", j, a8[j], b8[j]); } num_errors++; } } return num_errors; } typedef struct { size_t len; uint32_t num_keys; uint8_t* input; uint8_t* output; uint8_t* output_ref; uint8_t ivec_orig[AES_BLOCK_SIZE]; uint8_t ivec[AES_BLOCK_SIZE]; uint8_t ivec_ref[AES_BLOCK_SIZE]; uint8_t chacha_ivec[CHACHA_BLOCK_SIZE]; uint8_t chacha_ivec_orig[CHACHA_BLOCK_SIZE]; uint8_t chacha_ivec_ref[CHACHA_BLOCK_SIZE]; } ctx_t; void free_ctx(ctx_t* ctx) { free(ctx->input); free(ctx->output); free(ctx->output_ref); } void clear_ctx(ctx_t* ctx) { memset(ctx->input, 0, ctx->len); memset(ctx->output, 0, ctx->len); memset(ctx->output_ref, 0, ctx->len); } int test_chacha_cbc_sample(ctx_t* gctx) { printf("Starting gpu cbc chacha..\n"); uint8_t key[CHACHA_KEY_SIZE] = {0}; for (int i = 0; i < CHACHA_KEY_SIZE; i++) { key[i] = i; } cuda_chacha20_cbc_encrypt(gctx->input, gctx->output_ref, gctx->len, key, gctx->chacha_ivec); memcpy(gctx->chacha_ivec_ref, gctx->chacha_ivec, sizeof(gctx->chacha_ivec)); printf("\n\n"); print_bytes("output_ref", gctx->output_ref, gctx->len); int iterations = 1; perftime_t start, end; get_time(&start); for (int i = 0; i < iterations; i++) { cuda_chacha20_cbc_encrypt(gctx->input, gctx->output, gctx->len, key, gctx->chacha_ivec); } get_time(&end); print_bytes("output", gctx->output, gctx->len); float time_us = get_diff(&start, &end); float ns_per_byte = 1000.f * time_us / ((float)iterations * (float)gctx->len); printf("time: %f ns/byte time: %f us\n", ns_per_byte, time_us); uint8_t* outputs = (uint8_t*)calloc(gctx->len, gctx->num_keys); uint8_t* ivecs = (uint8_t*)calloc(CHACHA_BLOCK_SIZE, gctx->num_keys); uint8_t* keys = (uint8_t*)calloc(CHACHA_KEY_SIZE, gctx->num_keys); for (uint32_t i = 0; i < gctx->num_keys; i++) { memcpy(&keys[i * CHACHA_KEY_SIZE], key, CHACHA_KEY_SIZE); memcpy(&ivecs[i * CHACHA_BLOCK_SIZE], gctx->chacha_ivec_orig, CHACHA_BLOCK_SIZE); } uint64_t samples[1] = {0}; chacha_cbc_encrypt_many_sample((uint8_t*)gctx->input, outputs, gctx->len, keys, ivecs, gctx->num_keys, samples, 1, 0, &time_us); ns_per_byte = 1000.f * time_us / ((float)gctx->len * (float)gctx->num_keys); printf("gpu time: %f ns/byte time: %f us\n", ns_per_byte, time_us); int output_errors = 0, ivec_errors = 0; for (uint32_t i = 0; i < gctx->num_keys; i++) { if (0 != verbose_memcmp(gctx->output_ref, &outputs[i * gctx->len], gctx->len)) { if (output_errors < 10) { printf("%d gpu output not matching! %x\n", i, outputs[0]); } output_errors++; break; } if (0 != verbose_memcmp(gctx->chacha_ivec_ref, &ivecs[i * CHACHA_BLOCK_SIZE], CHACHA_BLOCK_SIZE)) { if (ivec_errors < 1) { printf("%d ivecs output not matching! %x\n", i, ivecs[0]); } ivec_errors++; } } printf("total keys: %d output_errors: %d ivec_errors: %d\n", gctx->num_keys, output_errors, ivec_errors); print_bytes("gpu output", outputs, gctx->len); print_bytes("gpu ivec", ivecs, CHACHA_BLOCK_SIZE); free(outputs); free(ivecs); free(keys); return 0; } int test_chacha_cbc(ctx_t* gctx) { printf("Starting gpu cbc chacha..\n"); uint8_t key[CHACHA_KEY_SIZE] = {0}; for (int i = 0; i < CHACHA_KEY_SIZE; i++) { key[i] = i; } cuda_chacha20_cbc_encrypt(gctx->input, gctx->output_ref, gctx->len, key, gctx->chacha_ivec); memcpy(gctx->chacha_ivec_ref, gctx->chacha_ivec, sizeof(gctx->chacha_ivec)); printf("\n\n"); print_bytes("output_ref", gctx->output_ref, gctx->len); int iterations = 1; perftime_t start, end; get_time(&start); for (int i = 0; i < iterations; i++) { cuda_chacha20_cbc_encrypt(gctx->input, gctx->output, gctx->len, key, gctx->chacha_ivec); } get_time(&end); print_bytes("output", gctx->output, gctx->len); float time_us = get_diff(&start, &end); float ns_per_byte = 1000.f * time_us / ((float)iterations * (float)gctx->len); printf("time: %f ns/byte time: %f us\n", ns_per_byte, time_us); uint8_t* outputs = (uint8_t*)calloc(gctx->len, gctx->num_keys); uint8_t* ivecs = (uint8_t*)calloc(CHACHA_BLOCK_SIZE, gctx->num_keys); uint8_t* keys = (uint8_t*)calloc(CHACHA_KEY_SIZE, gctx->num_keys); for (uint32_t i = 0; i < gctx->num_keys; i++) { memcpy(&keys[i * CHACHA_KEY_SIZE], key, CHACHA_KEY_SIZE); memcpy(&ivecs[i * CHACHA_BLOCK_SIZE], gctx->chacha_ivec_orig, CHACHA_BLOCK_SIZE); } chacha_cbc_encrypt_many((uint8_t*)gctx->input, outputs, gctx->len, keys, ivecs, gctx->num_keys, &time_us); ns_per_byte = 1000.f * time_us / ((float)gctx->len * (float)gctx->num_keys); printf("gpu time: %f ns/byte time: %f us\n", ns_per_byte, time_us); int output_errors = 0, ivec_errors = 0; for (uint32_t i = 0; i < gctx->num_keys; i++) { if (0 != verbose_memcmp(gctx->output_ref, &outputs[i * gctx->len], gctx->len)) { if (output_errors < 10) { printf("%d gpu output not matching! %x\n", i, outputs[0]); } output_errors++; break; } if (0 != verbose_memcmp(gctx->chacha_ivec_ref, &ivecs[i * CHACHA_BLOCK_SIZE], CHACHA_BLOCK_SIZE)) { if (ivec_errors < 1) { printf("%d ivecs output not matching! %x\n", i, ivecs[0]); } ivec_errors++; } } printf("total keys: %d output_errors: %d ivec_errors: %d\n", gctx->num_keys, output_errors, ivec_errors); print_bytes("gpu output", outputs, gctx->len); print_bytes("gpu ivec", ivecs, CHACHA_BLOCK_SIZE); free(outputs); free(ivecs); free(keys); return 0; } int test_chacha_ctr(ctx_t* gctx) { printf("Starting gpu ctr chacha..\n"); uint8_t key[CHACHA_KEY_SIZE] = {0}; uint8_t nonce[CHACHA_NONCE_SIZE] = {0}; for (int i = 0; i < CHACHA_KEY_SIZE; i++) { key[i] = i; } for (int i = 0; i < CHACHA_NONCE_SIZE; i++) { nonce[i] = i; } chacha20_ctr_encrypt(gctx->input, gctx->output_ref, gctx->len, key, nonce, 0); printf("\n\n"); print_bytes("output_ref", gctx->output_ref, gctx->len); int iterations = 1; perftime_t start, end; get_time(&start); for (int i = 0; i < iterations; i++) { chacha20_ctr_encrypt(gctx->input, gctx->output, gctx->len, key, nonce, i); } get_time(&end); print_bytes("output", gctx->output, gctx->len); float time_us = get_diff(&start, &end); float ns_per_byte = 1000.f * time_us / ((float)iterations * (float)gctx->len); printf("time: %f ns/byte time: %f us\n", ns_per_byte, time_us); uint8_t* outputs = (uint8_t*)calloc(gctx->len, gctx->num_keys); uint8_t* nonces = (uint8_t*)calloc(CHACHA_NONCE_SIZE, gctx->num_keys); uint8_t* keys = (uint8_t*)calloc(CHACHA_KEY_SIZE, gctx->num_keys); for (uint32_t i = 0; i < gctx->num_keys; i++) { memcpy(&keys[i * CHACHA_KEY_SIZE], key, CHACHA_KEY_SIZE); memcpy(&nonces[i * CHACHA_NONCE_SIZE], nonce, CHACHA_NONCE_SIZE); } chacha_ctr_encrypt_many((uint8_t*)gctx->input, outputs, gctx->len, keys, nonces, gctx->num_keys, &time_us); ns_per_byte = 1000.f * time_us / ((float)gctx->len * (float)gctx->num_keys); printf("gpu time: %f ns/byte time: %f us\n", ns_per_byte, time_us); int output_errors = 0, ivec_errors = 0; for (uint32_t i = 0; i < gctx->num_keys; i++) { if (0 != verbose_memcmp(gctx->output_ref, &outputs[i * gctx->len], gctx->len)) { if (output_errors < 10) { printf("%d gpu output not matching! %x\n", i, outputs[0]); } output_errors++; break; } } printf("total keys: %d output_errors: %d ivec_errors: %d\n", gctx->num_keys, output_errors, ivec_errors); print_bytes("gpu output", outputs, gctx->len); free(outputs); free(nonces); free(keys); return 0; } int test_aes(ctx_t* gctx) { printf("Starting gpu aes..\n"); AES_KEY key; unsigned char key_str[16] = "foobar"; int res = AES_set_encrypt_key(key_str, 128, &key); if (res == 0) { printf("we have a key.. rounds=%d\n", key.rounds); } else { printf("error!: %d\n", res); return 1; } AES_cbc_encrypt(gctx->input, gctx->output_ref, gctx->len, &key, gctx->ivec, AES_ENCRYPT); memcpy(gctx->ivec_ref, gctx->ivec, sizeof(gctx->ivec)); printf("\n\n"); print_bytes("output_ref", gctx->output_ref, gctx->len); int iterations = 1; perftime_t start, end; get_time(&start); for (int i = 0; i < iterations; i++) { AES_cbc_encrypt(gctx->input, gctx->output, gctx->len, &key, gctx->ivec, AES_ENCRYPT); } get_time(&end); print_bytes("output", gctx->output, gctx->len); float time_us = get_diff(&start, &end); float ns_per_byte = 1000.f * time_us / ((float)iterations * (float)gctx->len); printf("time: %f ns/byte time: %f us\n", ns_per_byte, time_us); uint8_t* outputs = (uint8_t*)calloc(gctx->len, gctx->num_keys); uint8_t* ivecs = (uint8_t*)calloc(16, gctx->num_keys); AES_KEY* keys = (AES_KEY*)calloc(sizeof(AES_KEY), gctx->num_keys); for (uint32_t i = 0; i < gctx->num_keys; i++) { int res = AES_set_encrypt_key(key_str, 128, &keys[i]); if (res != 0) { printf("Something wrong res: %d\n", res); return 1; } memcpy(&ivecs[i * AES_BLOCK_SIZE], gctx->ivec_orig, AES_BLOCK_SIZE); } AES_cbc_encrypt_many(gctx->input, outputs, gctx->len, keys, ivecs, gctx->num_keys, &time_us); ns_per_byte = 1000.f * time_us / ((float)gctx->len * (float)gctx->num_keys); printf("gpu time: %f ns/byte time: %f us\n", ns_per_byte, time_us); int output_errors = 0, ivec_errors = 0; for (uint32_t i = 0; i < gctx->num_keys; i++) { if (0 != verbose_memcmp(gctx->output_ref, &outputs[i * gctx->len], gctx->len)) { if (output_errors < 10) { printf("%d gpu output not matching! %x\n", i, outputs[0]); } output_errors++; break; } if (0 != verbose_memcmp(gctx->ivec_ref, &ivecs[i * 16], 16)) { if (ivec_errors < 1) { printf("%d ivecs output not matching! %x\n", i, ivecs[0]); } ivec_errors++; } } printf("total keys: %d output_errors: %d ivec_errors: %d\n", gctx->num_keys, output_errors, ivec_errors); print_bytes("gpu output", outputs, gctx->len); print_bytes("gpu ivec", ivecs, 16); free(outputs); free(ivecs); free(keys); return 0; } int main(int argc, const char* argv[]) { printf("Starting gpu crypto..\n"); ctx_t ctx = {0}; ctx.len = 64; ctx.num_keys = 4; int arg = 1; if (arg < argc) { ctx.num_keys = strtol(argv[arg++], NULL, 10); } if (arg < argc) { ctx.len = strtol(argv[arg++], NULL, 10); } if (ctx.num_keys == 0) { printf("ERROR: num_keys == 0!\n"); return 1; } printf("num_keys: %d len: %zu\n", ctx.num_keys, ctx.len); ctx.input = (uint8_t*)calloc(ctx.len, 1); ctx.output = (uint8_t*)calloc(ctx.len, 1); ctx.output_ref = (uint8_t*)calloc(ctx.len, 1); uint8_t ivec_orig[16] = {0xde, 0xad, 0xbe, 0xef}; memcpy(ctx.ivec_orig, ivec_orig, sizeof(ctx.ivec_orig)); memcpy(ctx.ivec, ivec_orig, sizeof(ctx.ivec)); uint8_t chacha_ivec_orig[CHACHA_BLOCK_SIZE] = {0xde, 0xad, 0xbe, 0xef}; memcpy(ctx.chacha_ivec_orig, chacha_ivec_orig, sizeof(ctx.chacha_ivec_orig)); memcpy(ctx.chacha_ivec, chacha_ivec_orig, sizeof(ctx.chacha_ivec)); //test_aes(&ctx); clear_ctx(&ctx); //test_chacha_ctr(&ctx); //clear_ctx(&ctx); //test_chacha_cbc(&ctx); test_chacha_cbc_sample(&ctx); free_ctx(&ctx); return 0; }
da1b12720631ab2e02a70f142d10985dad043e04.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" main() { while (error >= criteria){ error = CG(); itr ++; } } CG() { // GPU start (A.p) *double *d_u, *d_p, *d_r, *d_Ap; // allocate device memory hipMalloc( &d_u, (N*N)*sizeof(double) ); // transfer data from CPU to GPU hipMemcpy( d_u, u, (N*N)*sizeof(double), hipMemcpyHostToDevice ); // execute the GPU kernel hipLaunchKernelGGL(( operator_GPU) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_Ap, d_p, dx, dy, N, N_ln); // transfer data from GPU to CPU hipMemcpy( Ap, d_Ap, (N_ln*N_ln)*sizeof(double), hipMemcpyDeviceToHost ); cudafree(d_u); } //////////////////////////////////////////////// main() { // allocate device memory hipMalloc( &d_u, (N*N)*sizeof(double) ); // transfer data from CPU to GPU hipMemcpy( d_u, u, (N*N)*sizeof(double), hipMemcpyHostToDevice ); while (error >= criteria){ error = CG(); itr ++; } // transfer data from GPU to CPU hipMemcpy( u, d_u, (N*N)*sizeof(double), hipMemcpyDeviceToHost ); cudafree(d_u); } CG() { // execute the GPU kernel hipLaunchKernelGGL(( operator_GPU) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_Ap, d_p, dx, dy, N, N_ln); // transfer data from GPU to CPU hipMemcpy( Ap, d_Ap, (N_ln*N_ln)*sizeof(double), hipMemcpyDeviceToHost ); // CPU calculation rr0 = inner_product(r,r,0,N,N_ln); pAp = inner_product(p,Ap,1,N,N_ln); // pAp alpha = rr0/pAp; }
da1b12720631ab2e02a70f142d10985dad043e04.cu
main() { while (error >= criteria){ error = CG(); itr ++; } } CG() { // GPU start (A.p) *double *d_u, *d_p, *d_r, *d_Ap; // allocate device memory cudaMalloc( &d_u, (N*N)*sizeof(double) ); // transfer data from CPU to GPU cudaMemcpy( d_u, u, (N*N)*sizeof(double), cudaMemcpyHostToDevice ); // execute the GPU kernel operator_GPU <<< dimGrid, dimBlock >>> (d_Ap, d_p, dx, dy, N, N_ln); // transfer data from GPU to CPU cudaMemcpy( Ap, d_Ap, (N_ln*N_ln)*sizeof(double), cudaMemcpyDeviceToHost ); cudafree(d_u); } //////////////////////////////////////////////// main() { // allocate device memory cudaMalloc( &d_u, (N*N)*sizeof(double) ); // transfer data from CPU to GPU cudaMemcpy( d_u, u, (N*N)*sizeof(double), cudaMemcpyHostToDevice ); while (error >= criteria){ error = CG(); itr ++; } // transfer data from GPU to CPU cudaMemcpy( u, d_u, (N*N)*sizeof(double), cudaMemcpyDeviceToHost ); cudafree(d_u); } CG() { // execute the GPU kernel operator_GPU <<< dimGrid, dimBlock >>> (d_Ap, d_p, dx, dy, N, N_ln); // transfer data from GPU to CPU cudaMemcpy( Ap, d_Ap, (N_ln*N_ln)*sizeof(double), cudaMemcpyDeviceToHost ); // CPU calculation rr0 = inner_product(r,r,0,N,N_ln); pAp = inner_product(p,Ap,1,N,N_ln); // pAp alpha = rr0/pAp; }
1ac2c84b7e46647f25a7c749f70752e97e60d2a6.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <math.h> #include <helper_cuda.h> #include <thrust/complex.h> #include <algorithm> //#include <thrust> using namespace thrust; #include "ragridder_plan.h" #include "conv_interp_invoker.h" #include "cuft.h" #include "deconv.h" #include "cugridder.h" #include "precomp.h" #include "utils.h" int main(int argc, char *argv[]) { /* Input: M, N1, N2, epsilon method method - conv method M - number of randomly distributed points N1, N2 - output size epsilon - tolerance */ // issue related to accuary - how to set sigma, epsilon, number of plane, beta and kw. the number of w plane may need to increase. int ier = 0; int N = 100; PCS sigma = 2; // upsampling factor int M = 100; PCS epsilon = 1e-6; int kerevalmeth = 0; int method=0; //gpu_method == 0, nupts driven //int ier; PCS *u; CPX *c; u = (PCS *)malloc(M * sizeof(PCS)); //Allocates page-locked memory on the host. c = (CPX *)malloc(M * sizeof(CPX)); PCS *d_u; CUCPX *d_c, *d_fk; CUCPX *d_fw; checkCudaErrors(hipMalloc(&d_u, M * sizeof(PCS))); checkCudaErrors(hipMalloc(&d_c, M * sizeof(CUCPX))); /// pixel size // generating data for (int i = 0; i < M; i++) { u[i] = 2.0 + i*PI/(double)M; //xxxxx c[i].real(randm11()*1000); c[i].imag(i); // wgt[i] = 1; } PCS *k = (PCS*) malloc(sizeof(PCS)*N*10); // PCS pixelsize = 0.01; for (int i = 0; i < N; i++) { /* code */ // k[i] = (int)i-N/2; k[i] = -(double)i/(double)N; // k[i] = i/(double)N; // k[i] = i-N/2 + randm11(); printf("%.10lf ",k[i]); } printf("\n"); //data transfer checkCudaErrors(hipMemcpy(d_u, u, M * sizeof(PCS), hipMemcpyHostToDevice)); //u checkCudaErrors(hipMemcpy(d_c, c, M * sizeof(CUCPX), hipMemcpyHostToDevice)); /* ----------Step2: plan setting------------*/ curafft_plan *plan; plan = new curafft_plan(); memset(plan, 0, sizeof(curafft_plan)); PCS *d_k; checkCudaErrors(hipMalloc((void**)&d_k,sizeof(PCS)*N)); checkCudaErrors(hipMemcpy(d_k,k,sizeof(PCS)*N,hipMemcpyHostToDevice)); plan->d_x = d_k; int direction = 1; //inverse cunufft_setting(N,1,1,M,kerevalmeth,method,direction,epsilon,sigma,3,1,d_u,NULL,NULL,d_c,plan); int nf1 = plan->nf1; printf("conv info printing, sigma %lf, kw %d, beta %lf, nf1 %d\n",plan->copts.upsampfac,plan->copts.kw,plan->copts.ES_beta, nf1); // // fourier_series_appro_invoker(d_fwkerhalf,plan->copts,nf1/2+1); PCS *fwkerhalf = (PCS *)malloc(sizeof(PCS)*(N)); checkCudaErrors(hipMemcpy(fwkerhalf, plan->fwkerhalf1, sizeof(PCS)*(N), hipMemcpyDeviceToHost)); //fourier_series(fwkerhalf,k,plan->copts,N,nf1/2+1); #ifdef DEBUG printf("correction factor printing method1...\n"); for (int i = 0; i < N; i++) { /* code */ printf("%lf ",fwkerhalf[i]); } printf("\n"); #endif // fw (conv res set) checkCudaErrors(hipMalloc((void**)&d_fw,sizeof(CUCPX)*plan->nf1)); checkCudaErrors(hipMemset(d_fw, 0, sizeof(CUCPX)*plan->nf1)); plan->fw = d_fw; // fk malloc and set checkCudaErrors(hipMalloc((void**)&d_fk,sizeof(CUCPX)*N)); plan->fk = d_fk; // calulating result curafft_conv(plan); CPX *fw = (CPX *)malloc(sizeof(CPX)*plan->nf1); hipMemcpy(fw,plan->fw,sizeof(CUCPX)*plan->nf1,hipMemcpyDeviceToHost); #ifdef DEBUG printf("conv result printing...\n"); for (int i = 0; i < nf1; i++) { printf("%lf ",fw[i].real()); } printf("\n"); #endif PCS *kp = (PCS *) malloc(sizeof(PCS)*N); checkCudaErrors(hipMemcpy(kp,plan->d_x,sizeof(PCS)*N,hipMemcpyDeviceToHost)); CPX *fk = (CPX *)malloc(sizeof(CPX)*N); memset(fk,0,sizeof(CPX)*N); // dft for (int i = 0; i < N; i++) { /* code */ for (int j = 0; j < plan->nf1; j++) { if(j<nf1/2)fk[i] += fw[j+nf1/2] * exp((PCS)j * kp[i] * IMA); else fk[i] += fw[j-nf1/2] * exp(((PCS)j-(PCS)nf1) * kp[i] * IMA); // does j need to change? // decompose those calculation in order to reach better precision // double temp1; // int idx = j + plan->nf1/2; // temp1 = (double)j/(double)nf1; // if(j>=nf1/2){ // temp1 = temp1 - 1.00000000; // idx -= nf1; // } // temp1 *=PI * 2.0000000000; // temp1 *= k[i]; // fk[i] = fk[i] + fw[idx]*exp((double)temp1*IMA); // // fk[i].real( temp2 ); // fk[i].imag( temp3 ); // if(j<nf1/2){ // fk[i] += fw[j+nf1/2]*exp(k[i]*(((PCS)j)/((PCS)nf1)*2.0*PI*IMA)); // } // else{ // fk[i] += fw[j-nf1/2]*exp(k[i]*((j-nf1)/((PCS)nf1) )*2.0*PI*IMA); // decompose // } } } #ifdef DEBUG printf("dft result printing...\n"); for (int i = 0; i < N; i++) { /* code */ printf("%lf ",fk[i].real()); } printf("\n"); #endif // printf("correction factor printing...\n"); // for(int i=0; i<N1/2; i++){ // printf("%.3g ",fwkerhalf1[i]); // } // printf("\n"); // for(int i=0; i<N2/2; i++){ // printf("%.3g ",fwkerhalf2[i]); // } // printf("\n"); // deconv //PCS *fwkerhalf = (PCS *)malloc(sizeof(PCS)*(N)); //hipMemcpy(fwkerhalf, d_fwkerhalf, sizeof(PCS)*(N), hipMemcpyDeviceToHost); printf("i center %lf, o center %lf\n",plan->ta.i_center[0],plan->ta.o_center[0]); for(int i=0; i<N; i++){ fk[i] = fk[i] / fwkerhalf[i] * exp((k[i]-plan->ta.o_center[0])*plan->ta.i_center[0]*IMA); } // result printing printf("final result printing...\n"); for(int i=0; i<10; i++){ printf("%.10lf ",fk[i].real()); } printf("\n"); CPX *truth = (CPX *) malloc(sizeof(CPX)*N); printf("ground truth printing...\n"); for (int i = 0; i < N; i++) { truth[i] = 0; for (int j = 0; j < M; j++) { truth[i] += c[j] * exp(k[i] * u[j] * IMA); } } for (int i = 0; i < 10; i++) { printf("%.10lf ", truth[i].real()); } printf("\n"); // double fk_max = 0; // for(int i=0; i<M; i++){ // if(abs(fk[i].real())>fk_max)fk_max = abs(fk[i].real()); // } // printf("fk max %lf\n",fk_max); CPX diff; double err=0; double nrm=0; for(int i=0; i<N; i++){ diff = truth[i] - fk[i]; err += real(conj(diff)*diff); nrm += real(conj(fk[i])*fk[i]); } printf("l2 error %.6g\n",sqrt(err/nrm)); //free curafft_free(plan); free(fk); free(u); free(c); return ier; }
1ac2c84b7e46647f25a7c749f70752e97e60d2a6.cu
#include <iostream> #include <iomanip> #include <math.h> #include <helper_cuda.h> #include <thrust/complex.h> #include <algorithm> //#include <thrust> using namespace thrust; #include "ragridder_plan.h" #include "conv_interp_invoker.h" #include "cuft.h" #include "deconv.h" #include "cugridder.h" #include "precomp.h" #include "utils.h" int main(int argc, char *argv[]) { /* Input: M, N1, N2, epsilon method method - conv method M - number of randomly distributed points N1, N2 - output size epsilon - tolerance */ // issue related to accuary - how to set sigma, epsilon, number of plane, beta and kw. the number of w plane may need to increase. int ier = 0; int N = 100; PCS sigma = 2; // upsampling factor int M = 100; PCS epsilon = 1e-6; int kerevalmeth = 0; int method=0; //gpu_method == 0, nupts driven //int ier; PCS *u; CPX *c; u = (PCS *)malloc(M * sizeof(PCS)); //Allocates page-locked memory on the host. c = (CPX *)malloc(M * sizeof(CPX)); PCS *d_u; CUCPX *d_c, *d_fk; CUCPX *d_fw; checkCudaErrors(cudaMalloc(&d_u, M * sizeof(PCS))); checkCudaErrors(cudaMalloc(&d_c, M * sizeof(CUCPX))); /// pixel size // generating data for (int i = 0; i < M; i++) { u[i] = 2.0 + i*PI/(double)M; //xxxxx c[i].real(randm11()*1000); c[i].imag(i); // wgt[i] = 1; } PCS *k = (PCS*) malloc(sizeof(PCS)*N*10); // PCS pixelsize = 0.01; for (int i = 0; i < N; i++) { /* code */ // k[i] = (int)i-N/2; k[i] = -(double)i/(double)N; // k[i] = i/(double)N; // k[i] = i-N/2 + randm11(); printf("%.10lf ",k[i]); } printf("\n"); //data transfer checkCudaErrors(cudaMemcpy(d_u, u, M * sizeof(PCS), cudaMemcpyHostToDevice)); //u checkCudaErrors(cudaMemcpy(d_c, c, M * sizeof(CUCPX), cudaMemcpyHostToDevice)); /* ----------Step2: plan setting------------*/ curafft_plan *plan; plan = new curafft_plan(); memset(plan, 0, sizeof(curafft_plan)); PCS *d_k; checkCudaErrors(cudaMalloc((void**)&d_k,sizeof(PCS)*N)); checkCudaErrors(cudaMemcpy(d_k,k,sizeof(PCS)*N,cudaMemcpyHostToDevice)); plan->d_x = d_k; int direction = 1; //inverse cunufft_setting(N,1,1,M,kerevalmeth,method,direction,epsilon,sigma,3,1,d_u,NULL,NULL,d_c,plan); int nf1 = plan->nf1; printf("conv info printing, sigma %lf, kw %d, beta %lf, nf1 %d\n",plan->copts.upsampfac,plan->copts.kw,plan->copts.ES_beta, nf1); // // fourier_series_appro_invoker(d_fwkerhalf,plan->copts,nf1/2+1); PCS *fwkerhalf = (PCS *)malloc(sizeof(PCS)*(N)); checkCudaErrors(cudaMemcpy(fwkerhalf, plan->fwkerhalf1, sizeof(PCS)*(N), cudaMemcpyDeviceToHost)); //fourier_series(fwkerhalf,k,plan->copts,N,nf1/2+1); #ifdef DEBUG printf("correction factor printing method1...\n"); for (int i = 0; i < N; i++) { /* code */ printf("%lf ",fwkerhalf[i]); } printf("\n"); #endif // fw (conv res set) checkCudaErrors(cudaMalloc((void**)&d_fw,sizeof(CUCPX)*plan->nf1)); checkCudaErrors(cudaMemset(d_fw, 0, sizeof(CUCPX)*plan->nf1)); plan->fw = d_fw; // fk malloc and set checkCudaErrors(cudaMalloc((void**)&d_fk,sizeof(CUCPX)*N)); plan->fk = d_fk; // calulating result curafft_conv(plan); CPX *fw = (CPX *)malloc(sizeof(CPX)*plan->nf1); cudaMemcpy(fw,plan->fw,sizeof(CUCPX)*plan->nf1,cudaMemcpyDeviceToHost); #ifdef DEBUG printf("conv result printing...\n"); for (int i = 0; i < nf1; i++) { printf("%lf ",fw[i].real()); } printf("\n"); #endif PCS *kp = (PCS *) malloc(sizeof(PCS)*N); checkCudaErrors(cudaMemcpy(kp,plan->d_x,sizeof(PCS)*N,cudaMemcpyDeviceToHost)); CPX *fk = (CPX *)malloc(sizeof(CPX)*N); memset(fk,0,sizeof(CPX)*N); // dft for (int i = 0; i < N; i++) { /* code */ for (int j = 0; j < plan->nf1; j++) { if(j<nf1/2)fk[i] += fw[j+nf1/2] * exp((PCS)j * kp[i] * IMA); else fk[i] += fw[j-nf1/2] * exp(((PCS)j-(PCS)nf1) * kp[i] * IMA); // does j need to change? // decompose those calculation in order to reach better precision // double temp1; // int idx = j + plan->nf1/2; // temp1 = (double)j/(double)nf1; // if(j>=nf1/2){ // temp1 = temp1 - 1.00000000; // idx -= nf1; // } // temp1 *=PI * 2.0000000000; // temp1 *= k[i]; // fk[i] = fk[i] + fw[idx]*exp((double)temp1*IMA); // // fk[i].real( temp2 ); // fk[i].imag( temp3 ); // if(j<nf1/2){ // fk[i] += fw[j+nf1/2]*exp(k[i]*(((PCS)j)/((PCS)nf1)*2.0*PI*IMA)); // } // else{ // fk[i] += fw[j-nf1/2]*exp(k[i]*((j-nf1)/((PCS)nf1) )*2.0*PI*IMA); // decompose // } } } #ifdef DEBUG printf("dft result printing...\n"); for (int i = 0; i < N; i++) { /* code */ printf("%lf ",fk[i].real()); } printf("\n"); #endif // printf("correction factor printing...\n"); // for(int i=0; i<N1/2; i++){ // printf("%.3g ",fwkerhalf1[i]); // } // printf("\n"); // for(int i=0; i<N2/2; i++){ // printf("%.3g ",fwkerhalf2[i]); // } // printf("\n"); // deconv //PCS *fwkerhalf = (PCS *)malloc(sizeof(PCS)*(N)); //cudaMemcpy(fwkerhalf, d_fwkerhalf, sizeof(PCS)*(N), cudaMemcpyDeviceToHost); printf("i center %lf, o center %lf\n",plan->ta.i_center[0],plan->ta.o_center[0]); for(int i=0; i<N; i++){ fk[i] = fk[i] / fwkerhalf[i] * exp((k[i]-plan->ta.o_center[0])*plan->ta.i_center[0]*IMA); } // result printing printf("final result printing...\n"); for(int i=0; i<10; i++){ printf("%.10lf ",fk[i].real()); } printf("\n"); CPX *truth = (CPX *) malloc(sizeof(CPX)*N); printf("ground truth printing...\n"); for (int i = 0; i < N; i++) { truth[i] = 0; for (int j = 0; j < M; j++) { truth[i] += c[j] * exp(k[i] * u[j] * IMA); } } for (int i = 0; i < 10; i++) { printf("%.10lf ", truth[i].real()); } printf("\n"); // double fk_max = 0; // for(int i=0; i<M; i++){ // if(abs(fk[i].real())>fk_max)fk_max = abs(fk[i].real()); // } // printf("fk max %lf\n",fk_max); CPX diff; double err=0; double nrm=0; for(int i=0; i<N; i++){ diff = truth[i] - fk[i]; err += real(conj(diff)*diff); nrm += real(conj(fk[i])*fk[i]); } printf("l2 error %.6g\n",sqrt(err/nrm)); //free curafft_free(plan); free(fk); free(u); free(c); return ier; }
4c690509860d61ea4255be08fecc93f958deb1dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=[32,32,1] --blockDim=[16,16,1] typedef unsigned char uchar; texture<uchar, 3, hipReadModeNormalizedFloat> tex; // 3D texture typedef unsigned int uint; #define __umul24(x,y) (x*y) __global__ void d_render(uint *d_output, uint imageW, uint imageH, float w) { __requires(imageW == 32*16 /*gridDim.x*blockDim.x*/); uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; float u = x / (float) imageW; float v = y / (float) imageH; // read from 3D texture float voxel = tex3D(tex, u, v, w); if ((x < imageW) && (y < imageH)) { // write output color uint i = __umul24(y, imageW) + x; d_output[i] = voxel*255; } }
4c690509860d61ea4255be08fecc93f958deb1dd.cu
//pass //--gridDim=[32,32,1] --blockDim=[16,16,1] typedef unsigned char uchar; texture<uchar, 3, cudaReadModeNormalizedFloat> tex; // 3D texture typedef unsigned int uint; #define __umul24(x,y) (x*y) __global__ void d_render(uint *d_output, uint imageW, uint imageH, float w) { __requires(imageW == 32*16 /*gridDim.x*blockDim.x*/); uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; float u = x / (float) imageW; float v = y / (float) imageH; // read from 3D texture float voxel = tex3D(tex, u, v, w); if ((x < imageW) && (y < imageH)) { // write output color uint i = __umul24(y, imageW) + x; d_output[i] = voxel*255; } }
0f869039cda813e8d8518cc2a388b5d10e22d72f.hip
// !!! This is a file automatically generated by hipify!!! #include "training/communicator.h" #include "functional/functional.h" #include "tensors/tensor_operators.h" #ifdef USE_NCCL #include "hip/hip_runtime.h" #include "nccl.h" #endif namespace marian { #ifdef USE_NCCL class NCCLCommunicator : public Communicator { private: std::vector<ncclComm_t> comms_; std::vector<hipStream_t> streams_; std::vector<int> devices_; void synchronizeAll() { for(int i = 0; i < graphs_.size(); ++i) { hipSetDevice(devices_[i]); hipStreamSynchronize(streams_[i]); } } public: NCCLCommunicator(const std::vector<Ptr<ExpressionGraph>>& graphs) : Communicator(graphs), comms_(graphs.size()), streams_(graphs.size()), devices_(graphs.size()) { LOG(info, "[comm] Using NCCL library for GPU communication"); for(int i = 0; i < graphs_.size(); ++i) { auto device = graphs_[i]->getBackend()->getDevice(); ABORT_IF(device.type != DeviceType::gpu, "NCCL communicator can only be used with GPUs"); devices_[i] = device.no; hipSetDevice(devices_[i]); hipStreamCreate(&streams_[i]); } ncclCommInitAll(comms_.data(), devices_.size(), devices_.data()); } ~NCCLCommunicator() override { for(int i = 0; i < devices_.size(); ++i) { hipSetDevice(devices_[i]); hipStreamDestroy(streams_[i]); ncclCommDestroy(comms_[i]); } } void scatterReduce() override { int totalSize = graphs_[0]->params()->vals()->size(); int shardSize = ceil(totalSize / (float)graphs_.size()); int pos = 0; ncclGroupStart(); for(int i = 0; i < graphs_.size(); ++i) { int size = ::min(shardSize, totalSize); const void* sendbuff = (const void*)graphs_[i]->params()->grads()->data(); auto subgrad = graphs_[i]->params()->grads()->subtensor(pos, size); void* recvbuff = subgrad->data(); ncclReduceScatter(sendbuff, recvbuff, shardSize, ncclFloat, ncclSum, comms_[i], streams_[i]); pos += size; totalSize -= size; } ncclGroupEnd(); synchronizeAll(); } void allGather() override { int totalSize = graphs_[0]->params()->vals()->size(); int shardSize = ceil(totalSize / (float)graphs_.size()); int pos = 0; ncclGroupStart(); for(int i = 0; i < graphs_.size(); ++i) { int size = ::min(shardSize, totalSize); auto subparam = graphs_[i]->params()->vals()->subtensor(pos, size); const void* sendbuff = (const void*)subparam->data(); void* recvbuff = (void*)graphs_[i]->params()->vals()->data(); ncclAllGather(sendbuff, recvbuff, shardSize, ncclFloat, comms_[i], streams_[i]); pos += size; totalSize -= size; } ncclGroupEnd(); synchronizeAll(); } void swapParams(const std::vector<Tensor>& params) override { // Update all graphs with parameter shard ABORT_IF(graphs_.size() < 2, "Swap requires at least two graphs"); auto gather = [this, params](size_t idx, int pos) { // copy parameter shard to each graph, apart from last graph for(int i = 0; i < graphs_.size() - 1; ++i) { auto subParam = graphs_[i]->params()->vals()->subtensor(pos, params[idx]->size()); subParam->copyFrom(params[idx]); } // back-up shard from last graph auto subParamLast = graphs_.back()->params()->vals()->subtensor(pos, params[idx]->size()); params[idx]->copyFrom(subParamLast); auto subParamFirst = graphs_[0]->params()->vals()->subtensor(pos, params[idx]->size()); subParamLast->copyFrom(subParamFirst); }; // execute for each shard this->foreach(gather); } void pushParams(std::vector<Tensor>& params) override { // Copy paramter shard from i-th graph to shard params[i]. // Graphs and shards with the same index live on the same device. auto copy = [this, params](size_t idx, int pos) { // copy parameter shard to each graph auto subParam = graphs_[idx]->params()->vals()->subtensor(pos, params[idx]->size()); params[idx]->copyFrom(subParam); }; this->foreach(copy); } void pullParams(const std::vector<Tensor>& params) override { // Update all graphs with parameter shard auto gather = [this, params](size_t idx, int pos) { // copy parameter shard to each graph for(auto graph : graphs_) { auto subParam = graph->params()->vals()->subtensor(pos, params[idx]->size()); subParam->copyFrom(params[idx]); } }; this->foreach(gather); } // Doesn't work yet with NCCL // void pushParams(std::vector<Tensor>& params) { // // Copy paramter shard from i-th graph to shard params[i]. // // Graphs and shards with the same index live on the same device. // int pos = 0; // for(int i = 0; i < graphs_.size(); ++i) { // auto subParam = graphs_[i]->params()->vals()->subtensor(pos, params[i]->size()); // ncclGroupStart(); // ncclBroadcast((const void*)subParam->data(), // (void*)params[i]->data(), // params[i]->size(), // ncclFloat, // 0, // comms_[i], // streams_[i]); // ncclGroupEnd(); // pos += params[i]->size(); // } // synchronizeAll(); // } // void pullParams(const std::vector<Tensor>& params) { // // Update all graphs with parameter shard // int totalSize = graphs_[0]->params()->vals()->size(); // int shardSize = ceil(totalSize / (float)graphs_.size()); // ncclGroupStart(); // for(int i = 0; i < graphs_.size(); ++i) { // const void* sendbuff = (const void*)params[i]->data(); // void* recvbuff = (void*)graphs_[i]->params()->vals()->data(); // ncclAllGather(sendbuff, // recvbuff, // shardSize, // ncclFloat, // comms_[i], // streams_[i]); // } // ncclGroupEnd(); // synchronizeAll(); // } }; #endif Ptr<Communicator> createCommunicator(const std::vector<Ptr<ExpressionGraph>>& graphs, bool noNccl) { #ifdef USE_NCCL if(noNccl) { LOG(warn, "[comm] NCCL communicator overridden"); return New<DefaultCommunicator>(graphs); } // if at least one of the devices is not a gpu, fall-back to default for(auto& graph : graphs) { if(graph->getBackend()->getDevice().type == DeviceType::cpu) { return New<DefaultCommunicator>(graphs); } } size_t d = graphs.size(); if((d & (d - 1)) != 0) { LOG(warn, "[comm] Number of devices {} is not a power of 2 and communication might be slow with NCCL", d); LOG(warn, "[comm] You can switch off NCCL with --no-nccl option", d); } return New<NCCLCommunicator>(graphs); #else return New<DefaultCommunicator>(graphs); #endif } }
0f869039cda813e8d8518cc2a388b5d10e22d72f.cu
#include "training/communicator.h" #include "functional/functional.h" #include "tensors/tensor_operators.h" #ifdef USE_NCCL #include "cuda_runtime.h" #include "nccl.h" #endif namespace marian { #ifdef USE_NCCL class NCCLCommunicator : public Communicator { private: std::vector<ncclComm_t> comms_; std::vector<cudaStream_t> streams_; std::vector<int> devices_; void synchronizeAll() { for(int i = 0; i < graphs_.size(); ++i) { cudaSetDevice(devices_[i]); cudaStreamSynchronize(streams_[i]); } } public: NCCLCommunicator(const std::vector<Ptr<ExpressionGraph>>& graphs) : Communicator(graphs), comms_(graphs.size()), streams_(graphs.size()), devices_(graphs.size()) { LOG(info, "[comm] Using NCCL library for GPU communication"); for(int i = 0; i < graphs_.size(); ++i) { auto device = graphs_[i]->getBackend()->getDevice(); ABORT_IF(device.type != DeviceType::gpu, "NCCL communicator can only be used with GPUs"); devices_[i] = device.no; cudaSetDevice(devices_[i]); cudaStreamCreate(&streams_[i]); } ncclCommInitAll(comms_.data(), devices_.size(), devices_.data()); } ~NCCLCommunicator() override { for(int i = 0; i < devices_.size(); ++i) { cudaSetDevice(devices_[i]); cudaStreamDestroy(streams_[i]); ncclCommDestroy(comms_[i]); } } void scatterReduce() override { int totalSize = graphs_[0]->params()->vals()->size(); int shardSize = ceil(totalSize / (float)graphs_.size()); int pos = 0; ncclGroupStart(); for(int i = 0; i < graphs_.size(); ++i) { int size = std::min(shardSize, totalSize); const void* sendbuff = (const void*)graphs_[i]->params()->grads()->data(); auto subgrad = graphs_[i]->params()->grads()->subtensor(pos, size); void* recvbuff = subgrad->data(); ncclReduceScatter(sendbuff, recvbuff, shardSize, ncclFloat, ncclSum, comms_[i], streams_[i]); pos += size; totalSize -= size; } ncclGroupEnd(); synchronizeAll(); } void allGather() override { int totalSize = graphs_[0]->params()->vals()->size(); int shardSize = ceil(totalSize / (float)graphs_.size()); int pos = 0; ncclGroupStart(); for(int i = 0; i < graphs_.size(); ++i) { int size = std::min(shardSize, totalSize); auto subparam = graphs_[i]->params()->vals()->subtensor(pos, size); const void* sendbuff = (const void*)subparam->data(); void* recvbuff = (void*)graphs_[i]->params()->vals()->data(); ncclAllGather(sendbuff, recvbuff, shardSize, ncclFloat, comms_[i], streams_[i]); pos += size; totalSize -= size; } ncclGroupEnd(); synchronizeAll(); } void swapParams(const std::vector<Tensor>& params) override { // Update all graphs with parameter shard ABORT_IF(graphs_.size() < 2, "Swap requires at least two graphs"); auto gather = [this, params](size_t idx, int pos) { // copy parameter shard to each graph, apart from last graph for(int i = 0; i < graphs_.size() - 1; ++i) { auto subParam = graphs_[i]->params()->vals()->subtensor(pos, params[idx]->size()); subParam->copyFrom(params[idx]); } // back-up shard from last graph auto subParamLast = graphs_.back()->params()->vals()->subtensor(pos, params[idx]->size()); params[idx]->copyFrom(subParamLast); auto subParamFirst = graphs_[0]->params()->vals()->subtensor(pos, params[idx]->size()); subParamLast->copyFrom(subParamFirst); }; // execute for each shard this->foreach(gather); } void pushParams(std::vector<Tensor>& params) override { // Copy paramter shard from i-th graph to shard params[i]. // Graphs and shards with the same index live on the same device. auto copy = [this, params](size_t idx, int pos) { // copy parameter shard to each graph auto subParam = graphs_[idx]->params()->vals()->subtensor(pos, params[idx]->size()); params[idx]->copyFrom(subParam); }; this->foreach(copy); } void pullParams(const std::vector<Tensor>& params) override { // Update all graphs with parameter shard auto gather = [this, params](size_t idx, int pos) { // copy parameter shard to each graph for(auto graph : graphs_) { auto subParam = graph->params()->vals()->subtensor(pos, params[idx]->size()); subParam->copyFrom(params[idx]); } }; this->foreach(gather); } // Doesn't work yet with NCCL // void pushParams(std::vector<Tensor>& params) { // // Copy paramter shard from i-th graph to shard params[i]. // // Graphs and shards with the same index live on the same device. // int pos = 0; // for(int i = 0; i < graphs_.size(); ++i) { // auto subParam = graphs_[i]->params()->vals()->subtensor(pos, params[i]->size()); // ncclGroupStart(); // ncclBroadcast((const void*)subParam->data(), // (void*)params[i]->data(), // params[i]->size(), // ncclFloat, // 0, // comms_[i], // streams_[i]); // ncclGroupEnd(); // pos += params[i]->size(); // } // synchronizeAll(); // } // void pullParams(const std::vector<Tensor>& params) { // // Update all graphs with parameter shard // int totalSize = graphs_[0]->params()->vals()->size(); // int shardSize = ceil(totalSize / (float)graphs_.size()); // ncclGroupStart(); // for(int i = 0; i < graphs_.size(); ++i) { // const void* sendbuff = (const void*)params[i]->data(); // void* recvbuff = (void*)graphs_[i]->params()->vals()->data(); // ncclAllGather(sendbuff, // recvbuff, // shardSize, // ncclFloat, // comms_[i], // streams_[i]); // } // ncclGroupEnd(); // synchronizeAll(); // } }; #endif Ptr<Communicator> createCommunicator(const std::vector<Ptr<ExpressionGraph>>& graphs, bool noNccl) { #ifdef USE_NCCL if(noNccl) { LOG(warn, "[comm] NCCL communicator overridden"); return New<DefaultCommunicator>(graphs); } // if at least one of the devices is not a gpu, fall-back to default for(auto& graph : graphs) { if(graph->getBackend()->getDevice().type == DeviceType::cpu) { return New<DefaultCommunicator>(graphs); } } size_t d = graphs.size(); if((d & (d - 1)) != 0) { LOG(warn, "[comm] Number of devices {} is not a power of 2 and communication might be slow with NCCL", d); LOG(warn, "[comm] You can switch off NCCL with --no-nccl option", d); } return New<NCCLCommunicator>(graphs); #else return New<DefaultCommunicator>(graphs); #endif } }
b7efab7cef87448ed1cb391afcd976363d6b5bf1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // original code: // https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch39.html // Compile & run with `nvcc scanBlelloch.cu && ./a.out <x>` // where `x` defines the lower and upper integral's limits // i.e. [-x, x] (see the header of `main`) #include <cmath> #include <cstdio> #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 #define CONFLICT_FREE_OFFSET(n) \ ((n) >> LOG_NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) __global__ void blocksPrescan(float * g_odata, float * g_idata, float * SUMS) { extern __shared__ float tmp[]; uint N = 2 * blockDim.x; uint ai = threadIdx.x; uint bi = threadIdx.x + N / 2; g_idata += blockIdx.x * N; g_odata += blockIdx.x * N; // load input into shared memory uint bankOffsetA = CONFLICT_FREE_OFFSET(ai); uint bankOffsetB = CONFLICT_FREE_OFFSET(bi); tmp[ai + bankOffsetA] = g_idata[ai]; tmp[bi + bankOffsetB] = g_idata[bi]; uint offset(1); // build sum in place up the tree (up-sweep) for (uint d=N>>1; d>0; d>>=1) { __syncthreads(); if (threadIdx.x < d) { uint ai = offset * (2 * threadIdx.x + 1) - 1; uint bi = offset * (2 * threadIdx.x + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); tmp[bi] += tmp[ai]; } offset <<= 1; } // write the sum of the array chunk to 'SUMS' // and clear the last element float t; if (!threadIdx.x) { uint IDX = N - 1; IDX += CONFLICT_FREE_OFFSET(N - 1); if (SUMS) { t = tmp[IDX]; SUMS[blockIdx.x] = t; } tmp[IDX] = 0; } // traverse down tree & build scan (down-sweep) for (uint d=1; d<N; d *= 2) { offset >>= 1; __syncthreads(); if (threadIdx.x < d) { uint ai = offset * (2 * threadIdx.x + 1) - 1; uint bi = offset * (2 * threadIdx.x + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); t = tmp[ai]; tmp[ai] = tmp[bi]; tmp[bi] += t; } } __syncthreads(); // write results to device memory g_odata[ai] = tmp[ai + bankOffsetA]; g_odata[bi] = tmp[bi + bankOffsetB]; } __global__ void blocksShifter(float * g_odata, float * SUMS) { g_odata += 2 * blockIdx.x * blockDim.x; g_odata[threadIdx.x] += SUMS[blockIdx.x]; g_odata[threadIdx.x + blockDim.x] += SUMS[blockIdx.x]; } size_t smemSize(int n_el) { int extra_space = n_el / NUM_BANKS; extra_space += extra_space / NUM_BANKS; return sizeof(float) * (n_el + extra_space); } //////////////////////// MAIN ////////////////////////////// // Calculation of the integral \int_{-x}^x \exp(- t^2) dt // //////////////////////////////////////////////////////////// int main(int argc, char ** argv) { // set the integral's limits float x = atof(argv[1]); // discretization settings size_t n_blocks = 512; size_t block_size = 2048; size_t n_el = n_blocks * block_size; printf("Number of discretization points: %i\n", n_el); float * idata, * odata, * sums; hipMallocManaged(&idata, n_el * sizeof(float)); hipMallocManaged(&odata, n_el * sizeof(float)); hipMallocManaged(&sums, n_blocks * sizeof(float)); // calculate integrand's values float t, dt; dt = 2 * x / n_el; for (uint i=0; i<n_el; ++i) { t = - x + i * dt; idata[i] = exp(- t * t) * dt; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // measure the execution time of the framed block hipEventRecord(start); //------------------------------ hipLaunchKernelGGL(( blocksPrescan), dim3(n_blocks), dim3(block_size / 2), smemSize(block_size), 0, odata, idata, sums); hipLaunchKernelGGL(( blocksPrescan), dim3(1), dim3(n_blocks / 2), smemSize(n_blocks), 0, sums, sums, NULL); hipLaunchKernelGGL(( blocksShifter), dim3(n_blocks), dim3(block_size / 2), 0, 0, odata, sums); //------------------------------ hipEventRecord(stop); hipEventSynchronize(stop); float elapsed_time; hipEventElapsedTime(&elapsed_time, start, stop); printf("Elapsed time: %f ms\n", elapsed_time); printf("Results are written to 'output.bin'\n"); FILE * cfout = fopen("output.bin", "wb"); fwrite(odata, sizeof(float), n_el, cfout); fclose(cfout); hipFree(idata); hipFree(odata); hipFree(sums); }
b7efab7cef87448ed1cb391afcd976363d6b5bf1.cu
// original code: // https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch39.html // Compile & run with `nvcc scanBlelloch.cu && ./a.out <x>` // where `x` defines the lower and upper integral's limits // i.e. [-x, x] (see the header of `main`) #include <cmath> #include <cstdio> #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 #define CONFLICT_FREE_OFFSET(n) \ ((n) >> LOG_NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) __global__ void blocksPrescan(float * g_odata, float * g_idata, float * SUMS) { extern __shared__ float tmp[]; uint N = 2 * blockDim.x; uint ai = threadIdx.x; uint bi = threadIdx.x + N / 2; g_idata += blockIdx.x * N; g_odata += blockIdx.x * N; // load input into shared memory uint bankOffsetA = CONFLICT_FREE_OFFSET(ai); uint bankOffsetB = CONFLICT_FREE_OFFSET(bi); tmp[ai + bankOffsetA] = g_idata[ai]; tmp[bi + bankOffsetB] = g_idata[bi]; uint offset(1); // build sum in place up the tree (up-sweep) for (uint d=N>>1; d>0; d>>=1) { __syncthreads(); if (threadIdx.x < d) { uint ai = offset * (2 * threadIdx.x + 1) - 1; uint bi = offset * (2 * threadIdx.x + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); tmp[bi] += tmp[ai]; } offset <<= 1; } // write the sum of the array chunk to 'SUMS' // and clear the last element float t; if (!threadIdx.x) { uint IDX = N - 1; IDX += CONFLICT_FREE_OFFSET(N - 1); if (SUMS) { t = tmp[IDX]; SUMS[blockIdx.x] = t; } tmp[IDX] = 0; } // traverse down tree & build scan (down-sweep) for (uint d=1; d<N; d *= 2) { offset >>= 1; __syncthreads(); if (threadIdx.x < d) { uint ai = offset * (2 * threadIdx.x + 1) - 1; uint bi = offset * (2 * threadIdx.x + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); t = tmp[ai]; tmp[ai] = tmp[bi]; tmp[bi] += t; } } __syncthreads(); // write results to device memory g_odata[ai] = tmp[ai + bankOffsetA]; g_odata[bi] = tmp[bi + bankOffsetB]; } __global__ void blocksShifter(float * g_odata, float * SUMS) { g_odata += 2 * blockIdx.x * blockDim.x; g_odata[threadIdx.x] += SUMS[blockIdx.x]; g_odata[threadIdx.x + blockDim.x] += SUMS[blockIdx.x]; } size_t smemSize(int n_el) { int extra_space = n_el / NUM_BANKS; extra_space += extra_space / NUM_BANKS; return sizeof(float) * (n_el + extra_space); } //////////////////////// MAIN ////////////////////////////// // Calculation of the integral \int_{-x}^x \exp(- t^2) dt // //////////////////////////////////////////////////////////// int main(int argc, char ** argv) { // set the integral's limits float x = atof(argv[1]); // discretization settings size_t n_blocks = 512; size_t block_size = 2048; size_t n_el = n_blocks * block_size; printf("Number of discretization points: %i\n", n_el); float * idata, * odata, * sums; cudaMallocManaged(&idata, n_el * sizeof(float)); cudaMallocManaged(&odata, n_el * sizeof(float)); cudaMallocManaged(&sums, n_blocks * sizeof(float)); // calculate integrand's values float t, dt; dt = 2 * x / n_el; for (uint i=0; i<n_el; ++i) { t = - x + i * dt; idata[i] = exp(- t * t) * dt; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // measure the execution time of the framed block cudaEventRecord(start); //------------------------------ blocksPrescan<<<n_blocks, block_size / 2, smemSize(block_size)>>>(odata, idata, sums); blocksPrescan<<<1, n_blocks / 2, smemSize(n_blocks)>>>(sums, sums, NULL); blocksShifter<<<n_blocks, block_size / 2>>>(odata, sums); //------------------------------ cudaEventRecord(stop); cudaEventSynchronize(stop); float elapsed_time; cudaEventElapsedTime(&elapsed_time, start, stop); printf("Elapsed time: %f ms\n", elapsed_time); printf("Results are written to 'output.bin'\n"); FILE * cfout = fopen("output.bin", "wb"); fwrite(odata, sizeof(float), n_el, cfout); fclose(cfout); cudaFree(idata); cudaFree(odata); cudaFree(sums); }
3722f89414f4dc585852188cedf93e52184b4a78.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "reduce.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; hipMalloc(&a, XSIZE*YSIZE); int *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3722f89414f4dc585852188cedf93e52184b4a78.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "reduce.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); int *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); reduce<<<gridBlock,threadBlock>>>(a,b,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { reduce<<<gridBlock,threadBlock>>>(a,b,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { reduce<<<gridBlock,threadBlock>>>(a,b,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f5e6d233969e2408e0d8e6f82f6c48da5ab086ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <clover_field.h> #include <gauge_field.h> #include <gauge_field_order.h> namespace CloverOrder { using namespace quda; #include <clover_field_order.h> } // CloverOrder namespace quda { #ifdef GPU_CLOVER_DIRAC template<typename Float, typename Clover, typename Fmunu> struct CloverArg { int threads; // number of active threads required int X[4]; // grid dimensions #ifdef MULTI_GPU int border[4]; #endif double cloverCoeff; Clover clover; Fmunu f; CloverArg(Clover &clover, Fmunu& f, const GaugeField &meta, double cloverCoeff) : threads(meta.Volume()), cloverCoeff(cloverCoeff), clover(clover), f(f) { for(int dir=0; dir<4; ++dir) X[dir] = meta.X()[dir]; #ifdef MULTI_GPU for(int dir=0; dir<4; ++dir){ border[dir] = 2; } #endif } }; // Put into clover order // Upper-left block (chirality index 0) // / \ // | 1 + c*I*(F[0,1] - F[2,3]) , c*I*(F[1,2] - F[0,3]) + c*(F[0,2] + F[1,3]) | // | | // | c*I*(F[1,2] - F[0,3]) - c*(F[0,2] + F[1,3]), 1 - c*I*(F[0,1] - F[2,3]) | // | | // \ / // / // | 1 - c*I*(F[0] - F[5]), -c*I*(F[2] - F[3]) - c*(F[1] + F[4]) // | // | -c*I*(F[2] -F[3]) + c*(F[1] + F[4]), 1 + c*I*(F[0] - F[5]) // | // \ // // Lower-right block (chirality index 1) // // / \ // | 1 - c*I*(F[0] + F[5]), -c*I*(F[2] + F[3]) - c*(F[1] - F[4]) | // | | // | -c*I*(F[2]+F[3]) + c*(F[1]-F[4]), 1 + c*I*(F[0] + F[5]) | // \ / // // Core routine for constructing clover term from field strength template<typename Float, typename Clover, typename Fmunu> __device__ __host__ void cloverComputeCore(CloverArg<Float,Clover,Fmunu>& arg, int idx){ int parity = 0; if(idx >= arg.threads/2){ parity = 1; idx -= arg.threads/2; } typedef typename ComplexTypeId<Float>::Type Cmplx; // Load the field-strength tensor from global memory Matrix<Cmplx,3> F[6]; for(int i=0; i<6; ++i){ arg.f.load((Float*)(F[i].data), idx, i, parity); } Cmplx I; I.x = 0; I.y = 1.0; Cmplx coeff; coeff.x = 0; coeff.y = arg.cloverCoeff; Matrix<Cmplx,3> block1[2]; Matrix<Cmplx,3> block2[2]; block1[0] = coeff*(F[0]-F[5]); // (18 + 6*9=) 72 floating-point ops block1[1] = coeff*(F[0]+F[5]); // 72 floating-point ops block2[0] = arg.cloverCoeff*(F[1]+F[4] - I*(F[2]-F[3])); // 126 floating-point ops block2[1] = arg.cloverCoeff*(F[1]-F[4] - I*(F[2]+F[3])); // 126 floating-point ops const int idtab[15]={0,1,3,6,10,2,4,7,11,5,8,12,9,13,14}; Float diag[6]; Cmplx triangle[15]; Float A[72]; // This uses lots of unnecessary memory for(int ch=0; ch<2; ++ch){ // c = 0(1) => positive(negative) chiral block // Compute real diagonal elements for(int i=0; i<3; ++i){ diag[i] = 1.0 - block1[ch](i,i).x; diag[i+3] = 1.0 + block1[ch](i,i).x; } // Compute off diagonal components // First row triangle[0] = - block1[ch](1,0); // Second row triangle[1] = - block1[ch](2,0); triangle[2] = - block1[ch](2,1); // Third row triangle[3] = block2[ch](0,0); triangle[4] = block2[ch](0,1); triangle[5] = block2[ch](0,2); // Fourth row triangle[6] = block2[ch](1,0); triangle[7] = block2[ch](1,1); triangle[8] = block2[ch](1,2); triangle[9] = block1[ch](1,0); // Fifth row triangle[10] = block2[ch](2,0); triangle[11] = block2[ch](2,1); triangle[12] = block2[ch](2,2); triangle[13] = block1[ch](2,0); triangle[14] = block1[ch](2,1); for(int i=0; i<6; ++i){ A[ch*36 + i] = 0.5*diag[i]; } for(int i=0; i<15; ++i){ A[ch*36+6+2*i] = 0.5*triangle[idtab[i]].x; A[ch*36+6+2*i + 1] = 0.5*triangle[idtab[i]].y; } } // ch // 84 floating-point ops arg.clover.save(A, idx, parity); return; } template<typename Float, typename Clover, typename Fmunu> __global__ void cloverComputeKernel(CloverArg<Float,Clover,Fmunu> arg){ int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx >= arg.threads) return; cloverComputeCore(arg, idx); } template<typename Float, typename Clover, typename Fmunu> void cloverComputeCPU(CloverArg<Float,Clover,Fmunu> arg){ for(int idx=0; idx<arg.threads; ++idx){ cloverComputeCore(arg, idx); } } template<typename Float, typename Clover, typename Fmunu> class CloverCompute : Tunable { CloverArg<Float,Clover,Fmunu> arg; const GaugeField &meta; const QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune the shared memory. bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: CloverCompute(CloverArg<Float,Clover,Fmunu> &arg, const GaugeField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("threads=%d,stride=%d,prec=%lu",arg.threads,arg.clover.stride,sizeof(Float)); } virtual ~CloverCompute() {} void apply(const hipStream_t &stream) { if(location == QUDA_CUDA_FIELD_LOCATION){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); hipLaunchKernelGGL(( cloverComputeKernel), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg); } else { // run the CPU code cloverComputeCPU(arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } std::string paramString(const TuneParam &param) const { // Don't print the grid dim. std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), "; ps << "shared=" << param.shared_bytes; return ps.str(); } void preTune(){} void postTune(){} long long flops() const { return 480*arg.threads; } long long bytes() const { return arg.threads*(6*18 + 72)*sizeof(Float); } }; template<typename Float, typename Clover, typename Fmunu> void computeClover(Clover clover, Fmunu f, const GaugeField &meta, Float cloverCoeff, QudaFieldLocation location){ CloverArg<Float,Clover,Fmunu> arg(clover, f, meta, cloverCoeff); CloverCompute<Float,Clover,Fmunu> cloverCompute(arg, meta, location); cloverCompute.apply(0); checkCudaError(); hipDeviceSynchronize(); } template<typename Float> void computeClover(CloverField &clover, const GaugeField &f, Float cloverCoeff, QudaFieldLocation location){ if (f.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (clover.isNative()) { typedef typename CloverOrder::quda::clover_mapper<Float>::type C; computeClover(C(clover,0), FloatNOrder<Float,18,2,18>(f), f, cloverCoeff, location); } else { errorQuda("Clover field order %d not supported", clover.Order()); } // clover order } else { errorQuda("Fmunu field order %d not supported", f.Precision()); } } #endif void computeClover(CloverField &clover, const GaugeField& f, double cloverCoeff, QudaFieldLocation location){ #ifdef GPU_CLOVER_DIRAC if(clover.Precision() != f.Precision()){ errorQuda("Fmunu precision %d must match gauge precision %d", clover.Precision(), f.Precision()); } if (clover.Precision() == QUDA_DOUBLE_PRECISION){ computeClover<double>(clover, f, cloverCoeff, location); } else if(clover.Precision() == QUDA_SINGLE_PRECISION) { computeClover<float>(clover, f, cloverCoeff, location); } else { errorQuda("Precision %d not supported", clover.Precision()); } return; #else errorQuda("Clover has not been built"); #endif } } // namespace quda
f5e6d233969e2408e0d8e6f82f6c48da5ab086ae.cu
#include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <clover_field.h> #include <gauge_field.h> #include <gauge_field_order.h> namespace CloverOrder { using namespace quda; #include <clover_field_order.h> } // CloverOrder namespace quda { #ifdef GPU_CLOVER_DIRAC template<typename Float, typename Clover, typename Fmunu> struct CloverArg { int threads; // number of active threads required int X[4]; // grid dimensions #ifdef MULTI_GPU int border[4]; #endif double cloverCoeff; Clover clover; Fmunu f; CloverArg(Clover &clover, Fmunu& f, const GaugeField &meta, double cloverCoeff) : threads(meta.Volume()), cloverCoeff(cloverCoeff), clover(clover), f(f) { for(int dir=0; dir<4; ++dir) X[dir] = meta.X()[dir]; #ifdef MULTI_GPU for(int dir=0; dir<4; ++dir){ border[dir] = 2; } #endif } }; // Put into clover order // Upper-left block (chirality index 0) // / \ // | 1 + c*I*(F[0,1] - F[2,3]) , c*I*(F[1,2] - F[0,3]) + c*(F[0,2] + F[1,3]) | // | | // | c*I*(F[1,2] - F[0,3]) - c*(F[0,2] + F[1,3]), 1 - c*I*(F[0,1] - F[2,3]) | // | | // \ / // / // | 1 - c*I*(F[0] - F[5]), -c*I*(F[2] - F[3]) - c*(F[1] + F[4]) // | // | -c*I*(F[2] -F[3]) + c*(F[1] + F[4]), 1 + c*I*(F[0] - F[5]) // | // \ // // Lower-right block (chirality index 1) // // / \ // | 1 - c*I*(F[0] + F[5]), -c*I*(F[2] + F[3]) - c*(F[1] - F[4]) | // | | // | -c*I*(F[2]+F[3]) + c*(F[1]-F[4]), 1 + c*I*(F[0] + F[5]) | // \ / // // Core routine for constructing clover term from field strength template<typename Float, typename Clover, typename Fmunu> __device__ __host__ void cloverComputeCore(CloverArg<Float,Clover,Fmunu>& arg, int idx){ int parity = 0; if(idx >= arg.threads/2){ parity = 1; idx -= arg.threads/2; } typedef typename ComplexTypeId<Float>::Type Cmplx; // Load the field-strength tensor from global memory Matrix<Cmplx,3> F[6]; for(int i=0; i<6; ++i){ arg.f.load((Float*)(F[i].data), idx, i, parity); } Cmplx I; I.x = 0; I.y = 1.0; Cmplx coeff; coeff.x = 0; coeff.y = arg.cloverCoeff; Matrix<Cmplx,3> block1[2]; Matrix<Cmplx,3> block2[2]; block1[0] = coeff*(F[0]-F[5]); // (18 + 6*9=) 72 floating-point ops block1[1] = coeff*(F[0]+F[5]); // 72 floating-point ops block2[0] = arg.cloverCoeff*(F[1]+F[4] - I*(F[2]-F[3])); // 126 floating-point ops block2[1] = arg.cloverCoeff*(F[1]-F[4] - I*(F[2]+F[3])); // 126 floating-point ops const int idtab[15]={0,1,3,6,10,2,4,7,11,5,8,12,9,13,14}; Float diag[6]; Cmplx triangle[15]; Float A[72]; // This uses lots of unnecessary memory for(int ch=0; ch<2; ++ch){ // c = 0(1) => positive(negative) chiral block // Compute real diagonal elements for(int i=0; i<3; ++i){ diag[i] = 1.0 - block1[ch](i,i).x; diag[i+3] = 1.0 + block1[ch](i,i).x; } // Compute off diagonal components // First row triangle[0] = - block1[ch](1,0); // Second row triangle[1] = - block1[ch](2,0); triangle[2] = - block1[ch](2,1); // Third row triangle[3] = block2[ch](0,0); triangle[4] = block2[ch](0,1); triangle[5] = block2[ch](0,2); // Fourth row triangle[6] = block2[ch](1,0); triangle[7] = block2[ch](1,1); triangle[8] = block2[ch](1,2); triangle[9] = block1[ch](1,0); // Fifth row triangle[10] = block2[ch](2,0); triangle[11] = block2[ch](2,1); triangle[12] = block2[ch](2,2); triangle[13] = block1[ch](2,0); triangle[14] = block1[ch](2,1); for(int i=0; i<6; ++i){ A[ch*36 + i] = 0.5*diag[i]; } for(int i=0; i<15; ++i){ A[ch*36+6+2*i] = 0.5*triangle[idtab[i]].x; A[ch*36+6+2*i + 1] = 0.5*triangle[idtab[i]].y; } } // ch // 84 floating-point ops arg.clover.save(A, idx, parity); return; } template<typename Float, typename Clover, typename Fmunu> __global__ void cloverComputeKernel(CloverArg<Float,Clover,Fmunu> arg){ int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx >= arg.threads) return; cloverComputeCore(arg, idx); } template<typename Float, typename Clover, typename Fmunu> void cloverComputeCPU(CloverArg<Float,Clover,Fmunu> arg){ for(int idx=0; idx<arg.threads; ++idx){ cloverComputeCore(arg, idx); } } template<typename Float, typename Clover, typename Fmunu> class CloverCompute : Tunable { CloverArg<Float,Clover,Fmunu> arg; const GaugeField &meta; const QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune the shared memory. bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: CloverCompute(CloverArg<Float,Clover,Fmunu> &arg, const GaugeField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("threads=%d,stride=%d,prec=%lu",arg.threads,arg.clover.stride,sizeof(Float)); } virtual ~CloverCompute() {} void apply(const cudaStream_t &stream) { if(location == QUDA_CUDA_FIELD_LOCATION){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); cloverComputeKernel<<<tp.grid,tp.block,tp.shared_bytes>>>(arg); } else { // run the CPU code cloverComputeCPU(arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } std::string paramString(const TuneParam &param) const { // Don't print the grid dim. std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), "; ps << "shared=" << param.shared_bytes; return ps.str(); } void preTune(){} void postTune(){} long long flops() const { return 480*arg.threads; } long long bytes() const { return arg.threads*(6*18 + 72)*sizeof(Float); } }; template<typename Float, typename Clover, typename Fmunu> void computeClover(Clover clover, Fmunu f, const GaugeField &meta, Float cloverCoeff, QudaFieldLocation location){ CloverArg<Float,Clover,Fmunu> arg(clover, f, meta, cloverCoeff); CloverCompute<Float,Clover,Fmunu> cloverCompute(arg, meta, location); cloverCompute.apply(0); checkCudaError(); cudaDeviceSynchronize(); } template<typename Float> void computeClover(CloverField &clover, const GaugeField &f, Float cloverCoeff, QudaFieldLocation location){ if (f.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (clover.isNative()) { typedef typename CloverOrder::quda::clover_mapper<Float>::type C; computeClover(C(clover,0), FloatNOrder<Float,18,2,18>(f), f, cloverCoeff, location); } else { errorQuda("Clover field order %d not supported", clover.Order()); } // clover order } else { errorQuda("Fmunu field order %d not supported", f.Precision()); } } #endif void computeClover(CloverField &clover, const GaugeField& f, double cloverCoeff, QudaFieldLocation location){ #ifdef GPU_CLOVER_DIRAC if(clover.Precision() != f.Precision()){ errorQuda("Fmunu precision %d must match gauge precision %d", clover.Precision(), f.Precision()); } if (clover.Precision() == QUDA_DOUBLE_PRECISION){ computeClover<double>(clover, f, cloverCoeff, location); } else if(clover.Precision() == QUDA_SINGLE_PRECISION) { computeClover<float>(clover, f, cloverCoeff, location); } else { errorQuda("Precision %d not supported", clover.Precision()); } return; #else errorQuda("Clover has not been built"); #endif } } // namespace quda
c7286d4f7c6f6fe829144496b63a54e2f5da96c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/ztranspose.cu, normal z -> d, Mon Jun 25 18:24:13 2018 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define PRECISION_d #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) __device__ void dtranspose_device( int m, int n, const double *A, int lda, double *AT, int ldat) { __shared__ double sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void dtranspose_kernel( int m, int n, const double *A, int lda, double *AT, int ldat) { dtranspose_device(m, n, A, lda, AT, ldat); } __global__ void dtranspose_kernel_batched( int m, int n, double **dA_array, int lda, double **dAT_array, int ldat) { int batchid = blockIdx.z; dtranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /***************************************************************************//** Purpose ------- dtranspose copies and transposes a matrix dA to matrix dAT. Same as dtranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA DOUBLE PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT DOUBLE PRECISION array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_dtranspose( magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); hipLaunchKernelGGL(( dtranspose_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dAT, lddat ); } /***************************************************************************//** Purpose ------- dtranspose_batched copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as dtranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array DOUBLE PRECISION* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array DOUBLE PRECISION* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_transpose_batched *******************************************************************************/ extern "C" void magmablas_dtranspose_batched( magma_int_t m, magma_int_t n, double **dA_array, magma_int_t ldda, double **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY, 1 ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); hipLaunchKernelGGL(( dtranspose_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA_array, ldda, dAT_array, lddat ); }
c7286d4f7c6f6fe829144496b63a54e2f5da96c1.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/ztranspose.cu, normal z -> d, Mon Jun 25 18:24:13 2018 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define PRECISION_d #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) __device__ void dtranspose_device( int m, int n, const double *A, int lda, double *AT, int ldat) { __shared__ double sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void dtranspose_kernel( int m, int n, const double *A, int lda, double *AT, int ldat) { dtranspose_device(m, n, A, lda, AT, ldat); } __global__ void dtranspose_kernel_batched( int m, int n, double **dA_array, int lda, double **dAT_array, int ldat) { int batchid = blockIdx.z; dtranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /***************************************************************************//** Purpose ------- dtranspose copies and transposes a matrix dA to matrix dAT. Same as dtranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA DOUBLE PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT DOUBLE PRECISION array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_dtranspose( magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); dtranspose_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dAT, lddat ); } /***************************************************************************//** Purpose ------- dtranspose_batched copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as dtranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array DOUBLE PRECISION* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array DOUBLE PRECISION* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_transpose_batched *******************************************************************************/ extern "C" void magmablas_dtranspose_batched( magma_int_t m, magma_int_t n, double **dA_array, magma_int_t ldda, double **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY, 1 ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); dtranspose_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA_array, ldda, dAT_array, lddat ); }
17b61b390e215712cd3cadb8e23f27f9d9d57b78.hip
// !!! This is a file automatically generated by hipify!!! /** * @brief * @author Oded Green <br> * NVIDIA Corporation <br> * [email protected] * @date October, 2018 * * * @copyright Copyright 2017 Hornet. All rights reserved. * * @license{<blockquote> * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * </blockquote>} * * Please cite: * * J. Fox, O. Green, K. Gabert, X. An, D. Bader, Fast and Adaptive List Intersections on the GPU, * IEEE High Performance Extreme Computing Conference (HPEC), * Waltham, Massachusetts, 2018 * * O. Green, J. Fox, A. Tripathy, A. Watkins, K. Gabert, E. Kim, X. An, K. Aatish, D. Bader, * Logarithmic Radix Binning and Vectorized Triangle Counting, * IEEE High Performance Extreme Computing Conference (HPEC), * Waltham, Massachusetts, 2018 * * O. Green, P. Yalamanchili ,L.M. Munguia, Fast Triangle Counting on GPU, * Irregular Applications: Architectures and Algorithms (IA3), * New Orleans, Louisiana, 2014 * */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "Static/ClusteringCoefficient/cc.cuh" #include "Static/TriangleCounting/triangle2.cuh" #include "Core/StandardAPI.hpp" using namespace xlib; using namespace gpu; namespace hornets_nest { ClusteringCoefficient::ClusteringCoefficient(HornetGraph& hornet) : TriangleCounting2(hornet) // StaticAlgorithm(hornet) { } ClusteringCoefficient::~ClusteringCoefficient(){ TriangleCounting2::release(); release(); } struct OPERATOR_LocalClusteringCoefficients { triangle_t *d_triPerVertex; clusterCoeff_t *d_ccLocal; OPERATOR (Vertex &vertex) { degree_t deg = vertex.degree(); d_ccLocal[vertex.id()] = 0; if(deg>1){ d_ccLocal[vertex.id()] = (clusterCoeff_t)d_triPerVertex[vertex.id()]/(clusterCoeff_t)(deg*(deg-1)); } } }; void ClusteringCoefficient::reset(){ TriangleCounting2::reset(); } #include <hipcub/hipcub.hpp> void ClusteringCoefficient::run(){ TriangleCounting2::run(); forAllVertices(hornet, OPERATOR_LocalClusteringCoefficients { triPerVertex,d_ccLocal }); int _num_items = hornet.nV(); void* _d_temp_storage { nullptr }; size_t _temp_storage_bytes { 0 }; hipcub::DeviceReduce::Sum(_d_temp_storage, _temp_storage_bytes,d_ccLocal, d_ccGlobal, _num_items); // Allocating storage needed by CUB for the reduce hipMalloc(&_d_temp_storage, _temp_storage_bytes); hipcub::DeviceReduce::Sum(_d_temp_storage, _temp_storage_bytes, d_ccLocal, d_ccGlobal, _num_items); gpu::copyToHost(d_ccGlobal, 1, &h_ccGlobal); gpu::free(_d_temp_storage); std::cout << "Global CC " << h_ccGlobal/hornet.nV() << std::endl; } void ClusteringCoefficient::release(){ gpu::free(d_ccLocal); gpu::free(d_ccGlobal); d_ccLocal = nullptr; } void ClusteringCoefficient::init(){ //printf("Inside init. Printing hornet.nV(): %d\n", hornet.nV()); gpu::allocate(d_ccLocal, hornet.nV()); gpu::allocate(d_ccGlobal, 1); TriangleCounting2::init(); reset(); } void ClusteringCoefficient::copyLocalClusCoeffToHost(clusterCoeff_t* h_tcs){ gpu::copyToHost(d_ccLocal, hornet.nV(), h_tcs); } } // namespace hornets_nest
17b61b390e215712cd3cadb8e23f27f9d9d57b78.cu
/** * @brief * @author Oded Green <br> * NVIDIA Corporation <br> * [email protected] * @date October, 2018 * * * @copyright Copyright © 2017 Hornet. All rights reserved. * * @license{<blockquote> * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * </blockquote>} * * Please cite: * * J. Fox, O. Green, K. Gabert, X. An, D. Bader, “Fast and Adaptive List Intersections on the GPU”, * IEEE High Performance Extreme Computing Conference (HPEC), * Waltham, Massachusetts, 2018 * * O. Green, J. Fox, A. Tripathy, A. Watkins, K. Gabert, E. Kim, X. An, K. Aatish, D. Bader, * “Logarithmic Radix Binning and Vectorized Triangle Counting”, * IEEE High Performance Extreme Computing Conference (HPEC), * Waltham, Massachusetts, 2018 * * O. Green, P. Yalamanchili ,L.M. Munguia, “Fast Triangle Counting on GPU”, * Irregular Applications: Architectures and Algorithms (IA3), * New Orleans, Louisiana, 2014 * */ #include <cuda.h> #include <cuda_runtime.h> #include "Static/ClusteringCoefficient/cc.cuh" #include "Static/TriangleCounting/triangle2.cuh" #include "Core/StandardAPI.hpp" using namespace xlib; using namespace gpu; namespace hornets_nest { ClusteringCoefficient::ClusteringCoefficient(HornetGraph& hornet) : TriangleCounting2(hornet) // StaticAlgorithm(hornet) { } ClusteringCoefficient::~ClusteringCoefficient(){ TriangleCounting2::release(); release(); } struct OPERATOR_LocalClusteringCoefficients { triangle_t *d_triPerVertex; clusterCoeff_t *d_ccLocal; OPERATOR (Vertex &vertex) { degree_t deg = vertex.degree(); d_ccLocal[vertex.id()] = 0; if(deg>1){ d_ccLocal[vertex.id()] = (clusterCoeff_t)d_triPerVertex[vertex.id()]/(clusterCoeff_t)(deg*(deg-1)); } } }; void ClusteringCoefficient::reset(){ TriangleCounting2::reset(); } #include <cub/cub.cuh> void ClusteringCoefficient::run(){ TriangleCounting2::run(); forAllVertices(hornet, OPERATOR_LocalClusteringCoefficients { triPerVertex,d_ccLocal }); int _num_items = hornet.nV(); void* _d_temp_storage { nullptr }; size_t _temp_storage_bytes { 0 }; cub::DeviceReduce::Sum(_d_temp_storage, _temp_storage_bytes,d_ccLocal, d_ccGlobal, _num_items); // Allocating storage needed by CUB for the reduce cudaMalloc(&_d_temp_storage, _temp_storage_bytes); cub::DeviceReduce::Sum(_d_temp_storage, _temp_storage_bytes, d_ccLocal, d_ccGlobal, _num_items); gpu::copyToHost(d_ccGlobal, 1, &h_ccGlobal); gpu::free(_d_temp_storage); std::cout << "Global CC " << h_ccGlobal/hornet.nV() << std::endl; } void ClusteringCoefficient::release(){ gpu::free(d_ccLocal); gpu::free(d_ccGlobal); d_ccLocal = nullptr; } void ClusteringCoefficient::init(){ //printf("Inside init. Printing hornet.nV(): %d\n", hornet.nV()); gpu::allocate(d_ccLocal, hornet.nV()); gpu::allocate(d_ccGlobal, 1); TriangleCounting2::init(); reset(); } void ClusteringCoefficient::copyLocalClusCoeffToHost(clusterCoeff_t* h_tcs){ gpu::copyToHost(d_ccLocal, hornet.nV(), h_tcs); } } // namespace hornets_nest
be6efed45c86745bc1316ebc75f5383689343134.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "network.h" #include "detection_layer.h" #include "cost_layer.h" #include "utils.h" #include "parser.h" #include "box.h" #include "image.h" //#include <sys/time.h> #include <time.h> #include <winsock.h> #include "gettimeofday.h" } #pragma comment(lib, "opencv_core249.lib") #pragma comment(lib, "opencv_imgproc249.lib") #pragma comment(lib, "opencv_objdetect249.lib") #pragma comment(lib, "opencv_gpu249.lib") #pragma comment(lib, "opencv_features2d249.lib") #pragma comment(lib, "opencv_highgui249.lib") //#pragma comment(lib, "opencv_ml249.lib") #pragma comment(lib, "opencv_stitching249.lib") #pragma comment(lib, "opencv_nonfree249.lib") //#pragma comment(lib, "opencv_superres249.lib") #pragma comment(lib, "opencv_calib3d249.lib") #pragma comment(lib, "opencv_flann249.lib") //#pragma comment(lib, "opencv_contrib249.lib") //#pragma comment(lib, "opencv_legacy249.lib") #pragma comment(lib, "opencv_photo249.lib") #pragma comment(lib, "opencv_video249.lib") #ifdef OPENCV #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" extern "C" image ipl_to_image(IplImage* src); extern "C" void convert_yolo_detections(float *predictions, int classes, int num, int square, int side, int w, int h, float thresh, float **probs, box *boxes, int only_objectness); extern "C" void draw_yolo(image im, int num, float thresh, box *boxes, float **probs); extern "C" char *voc_names[]; extern "C" image voc_labels[]; static float **probs; static box *boxes; static network net; static image in ; static image in_s ; static image det ; static image det_s; static image disp ; static cv::VideoCapture cap; static float fps = 0; static float demo_thresh = 0; void *fetch_in_thread(void *ptr) { cv::Mat frame_m; cap >> frame_m; IplImage frame = frame_m; in = ipl_to_image(&frame); rgbgr_image(in); in_s = resize_image(in, net.w, net.h); return 0; } void *detect_in_thread(void *ptr) { float nms = .4; detection_layer l = net.layers[net.n-1]; float *X = det_s.data; float *predictions = network_predict(net, X); free_image(det_s); convert_yolo_detections(predictions, l.classes, l.n, l.sqrt, l.side, 1, 1, demo_thresh, probs, boxes, 0); if (nms > 0) do_nms(boxes, probs, l.side*l.side*l.n, l.classes, nms); printf("\033[2J"); printf("\033[1;1H"); printf("\nFPS:%.0f\n",fps); printf("Objects:\n\n"); //draw_detections(det, l.side*l.side*l.n, demo_thresh, boxes, probs, voc_names, voc_labels, CLASSNUM); draw_detections(det, l.side*l.side*l.n, demo_thresh, boxes, probs, voc_names, voc_labels, l.classes); return 0; } extern "C" void demo_yolo(char *cfgfile, char *weightfile, float thresh, int cam_index, char *filename) { demo_thresh = thresh; printf("YOLO demo\n"); net = parse_network_cfg(cfgfile); if(weightfile){ load_weights(&net, weightfile); } set_batch_network(&net, 1); srand(2222222); if (filename){ cap = cv::VideoCapture(filename); } else{ cap = cv::VideoCapture(cam_index); } //cv::VideoCapture cam(cam_index); //cap = cam; if(!cap.isOpened()) error("Couldn't connect to webcam.\n"); detection_layer l = net.layers[net.n-1]; int j; boxes = (box *)calloc(l.side*l.side*l.n, sizeof(box)); probs = (float **)calloc(l.side*l.side*l.n, sizeof(float *)); for(j = 0; j < l.side*l.side*l.n; ++j) probs[j] = (float *)calloc(l.classes, sizeof(float *)); pthread_t fetch_thread; pthread_t detect_thread; fetch_in_thread(0); det = in; det_s = in_s; fetch_in_thread(0); detect_in_thread(0); disp = det; det = in; det_s = in_s; while(1){ struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed"); if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed"); show_image(disp, "YOLO"); free_image(disp); cvWaitKey(1); pthread_join(fetch_thread, 0); pthread_join(detect_thread, 0); disp = det; det = in; det_s = in_s; gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); float curr = 1000000.f/((long int)tval_result.tv_usec); fps = .9*fps + .1*curr; } } #else extern "C" void demo_yolo(char *cfgfile, char *weightfile, float thresh, int cam_index){ fprintf(stderr, "YOLO demo needs OpenCV for webcam images.\n"); } #endif
be6efed45c86745bc1316ebc75f5383689343134.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "network.h" #include "detection_layer.h" #include "cost_layer.h" #include "utils.h" #include "parser.h" #include "box.h" #include "image.h" //#include <sys/time.h> #include <time.h> #include <winsock.h> #include "gettimeofday.h" } #pragma comment(lib, "opencv_core249.lib") #pragma comment(lib, "opencv_imgproc249.lib") #pragma comment(lib, "opencv_objdetect249.lib") #pragma comment(lib, "opencv_gpu249.lib") #pragma comment(lib, "opencv_features2d249.lib") #pragma comment(lib, "opencv_highgui249.lib") //#pragma comment(lib, "opencv_ml249.lib") #pragma comment(lib, "opencv_stitching249.lib") #pragma comment(lib, "opencv_nonfree249.lib") //#pragma comment(lib, "opencv_superres249.lib") #pragma comment(lib, "opencv_calib3d249.lib") #pragma comment(lib, "opencv_flann249.lib") //#pragma comment(lib, "opencv_contrib249.lib") //#pragma comment(lib, "opencv_legacy249.lib") #pragma comment(lib, "opencv_photo249.lib") #pragma comment(lib, "opencv_video249.lib") #ifdef OPENCV #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" extern "C" image ipl_to_image(IplImage* src); extern "C" void convert_yolo_detections(float *predictions, int classes, int num, int square, int side, int w, int h, float thresh, float **probs, box *boxes, int only_objectness); extern "C" void draw_yolo(image im, int num, float thresh, box *boxes, float **probs); extern "C" char *voc_names[]; extern "C" image voc_labels[]; static float **probs; static box *boxes; static network net; static image in ; static image in_s ; static image det ; static image det_s; static image disp ; static cv::VideoCapture cap; static float fps = 0; static float demo_thresh = 0; void *fetch_in_thread(void *ptr) { cv::Mat frame_m; cap >> frame_m; IplImage frame = frame_m; in = ipl_to_image(&frame); rgbgr_image(in); in_s = resize_image(in, net.w, net.h); return 0; } void *detect_in_thread(void *ptr) { float nms = .4; detection_layer l = net.layers[net.n-1]; float *X = det_s.data; float *predictions = network_predict(net, X); free_image(det_s); convert_yolo_detections(predictions, l.classes, l.n, l.sqrt, l.side, 1, 1, demo_thresh, probs, boxes, 0); if (nms > 0) do_nms(boxes, probs, l.side*l.side*l.n, l.classes, nms); printf("\033[2J"); printf("\033[1;1H"); printf("\nFPS:%.0f\n",fps); printf("Objects:\n\n"); //draw_detections(det, l.side*l.side*l.n, demo_thresh, boxes, probs, voc_names, voc_labels, CLASSNUM); draw_detections(det, l.side*l.side*l.n, demo_thresh, boxes, probs, voc_names, voc_labels, l.classes); return 0; } extern "C" void demo_yolo(char *cfgfile, char *weightfile, float thresh, int cam_index, char *filename) { demo_thresh = thresh; printf("YOLO demo\n"); net = parse_network_cfg(cfgfile); if(weightfile){ load_weights(&net, weightfile); } set_batch_network(&net, 1); srand(2222222); if (filename){ cap = cv::VideoCapture(filename); } else{ cap = cv::VideoCapture(cam_index); } //cv::VideoCapture cam(cam_index); //cap = cam; if(!cap.isOpened()) error("Couldn't connect to webcam.\n"); detection_layer l = net.layers[net.n-1]; int j; boxes = (box *)calloc(l.side*l.side*l.n, sizeof(box)); probs = (float **)calloc(l.side*l.side*l.n, sizeof(float *)); for(j = 0; j < l.side*l.side*l.n; ++j) probs[j] = (float *)calloc(l.classes, sizeof(float *)); pthread_t fetch_thread; pthread_t detect_thread; fetch_in_thread(0); det = in; det_s = in_s; fetch_in_thread(0); detect_in_thread(0); disp = det; det = in; det_s = in_s; while(1){ struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed"); if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed"); show_image(disp, "YOLO"); free_image(disp); cvWaitKey(1); pthread_join(fetch_thread, 0); pthread_join(detect_thread, 0); disp = det; det = in; det_s = in_s; gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); float curr = 1000000.f/((long int)tval_result.tv_usec); fps = .9*fps + .1*curr; } } #else extern "C" void demo_yolo(char *cfgfile, char *weightfile, float thresh, int cam_index){ fprintf(stderr, "YOLO demo needs OpenCV for webcam images.\n"); } #endif
81d4919be5ec852c493b2d101e0968d0df66a43d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <c10/util/Half.h> #include "bias_act.h" //------------------------------------------------------------------------ // Helpers. template <class T> struct InternalType; template <> struct InternalType<double> { typedef double scalar_t; }; template <> struct InternalType<float> { typedef float scalar_t; }; template <> struct InternalType<c10::Half> { typedef float scalar_t; }; //------------------------------------------------------------------------ // CUDA kernel. template <class T, int A> __global__ void bias_act_kernel(bias_act_kernel_params p) { typedef typename InternalType<T>::scalar_t scalar_t; int G = p.grad; scalar_t alpha = (scalar_t)p.alpha; scalar_t gain = (scalar_t)p.gain; scalar_t clamp = (scalar_t)p.clamp; scalar_t one = (scalar_t)1; scalar_t two = (scalar_t)2; scalar_t expRange = (scalar_t)80; scalar_t halfExpRange = (scalar_t)40; scalar_t seluScale = (scalar_t)1.0507009873554804934193349852946; scalar_t seluAlpha = (scalar_t)1.6732632423543772848170429916717; // Loop over elements. int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x; for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x) { // Load. scalar_t x = (scalar_t)((const T*)p.x)[xi]; scalar_t b = (p.b) ? (scalar_t)((const T*)p.b)[(xi / p.stepB) % p.sizeB] : 0; scalar_t xref = (p.xref) ? (scalar_t)((const T*)p.xref)[xi] : 0; scalar_t yref = (p.yref) ? (scalar_t)((const T*)p.yref)[xi] : 0; scalar_t dy = (p.dy) ? (scalar_t)((const T*)p.dy)[xi] : one; scalar_t yy = (gain != 0) ? yref / gain : 0; scalar_t y = 0; // Apply bias. ((G == 0) ? x : xref) += b; // linear if (A == 1) { if (G == 0) y = x; if (G == 1) y = x; } // relu if (A == 2) { if (G == 0) y = (x > 0) ? x : 0; if (G == 1) y = (yy > 0) ? x : 0; } // lrelu if (A == 3) { if (G == 0) y = (x > 0) ? x : x * alpha; if (G == 1) y = (yy > 0) ? x : x * alpha; } // tanh if (A == 4) { if (G == 0) { scalar_t c = exp(x); scalar_t d = one / c; y = (x < -expRange) ? -one : (x > expRange) ? one : (c - d) / (c + d); } if (G == 1) y = x * (one - yy * yy); if (G == 2) y = x * (one - yy * yy) * (-two * yy); } // sigmoid if (A == 5) { if (G == 0) y = (x < -expRange) ? 0 : one / (exp(-x) + one); if (G == 1) y = x * yy * (one - yy); if (G == 2) y = x * yy * (one - yy) * (one - two * yy); } // elu if (A == 6) { if (G == 0) y = (x >= 0) ? x : exp(x) - one; if (G == 1) y = (yy >= 0) ? x : x * (yy + one); if (G == 2) y = (yy >= 0) ? 0 : x * (yy + one); } // selu if (A == 7) { if (G == 0) y = (x >= 0) ? seluScale * x : (seluScale * seluAlpha) * (exp(x) - one); if (G == 1) y = (yy >= 0) ? x * seluScale : x * (yy + seluScale * seluAlpha); if (G == 2) y = (yy >= 0) ? 0 : x * (yy + seluScale * seluAlpha); } // softplus if (A == 8) { if (G == 0) y = (x > expRange) ? x : log(exp(x) + one); if (G == 1) y = x * (one - exp(-yy)); if (G == 2) { scalar_t c = exp(-yy); y = x * c * (one - c); } } // swish if (A == 9) { if (G == 0) y = (x < -expRange) ? 0 : x / (exp(-x) + one); else { scalar_t c = exp(xref); scalar_t d = c + one; if (G == 1) y = (xref > halfExpRange) ? x : x * c * (xref + d) / (d * d); else y = (xref > halfExpRange) ? 0 : x * c * (xref * (two - d) + two * d) / (d * d * d); yref = (xref < -expRange) ? 0 : xref / (exp(-xref) + one) * gain; } } // Apply gain. y *= gain * dy; // Clamp. if (clamp >= 0) { if (G == 0) y = (y > -clamp & y < clamp) ? y : (y >= 0) ? clamp : -clamp; else y = (yref > -clamp & yref < clamp) ? y : 0; } // Store. ((T*)p.y)[xi] = (T)y; } } //------------------------------------------------------------------------ // CUDA kernel selection. template <class T> void* choose_bias_act_kernel(const bias_act_kernel_params& p) { if (p.act == 1) return (void*)bias_act_kernel<T, 1>; if (p.act == 2) return (void*)bias_act_kernel<T, 2>; if (p.act == 3) return (void*)bias_act_kernel<T, 3>; if (p.act == 4) return (void*)bias_act_kernel<T, 4>; if (p.act == 5) return (void*)bias_act_kernel<T, 5>; if (p.act == 6) return (void*)bias_act_kernel<T, 6>; if (p.act == 7) return (void*)bias_act_kernel<T, 7>; if (p.act == 8) return (void*)bias_act_kernel<T, 8>; if (p.act == 9) return (void*)bias_act_kernel<T, 9>; return NULL; } //------------------------------------------------------------------------ // Template specializations. template void* choose_bias_act_kernel<double> (const bias_act_kernel_params& p); template void* choose_bias_act_kernel<float> (const bias_act_kernel_params& p); template void* choose_bias_act_kernel<c10::Half> (const bias_act_kernel_params& p); //------------------------------------------------------------------------
81d4919be5ec852c493b2d101e0968d0df66a43d.cu
#include <c10/util/Half.h> #include "bias_act.h" //------------------------------------------------------------------------ // Helpers. template <class T> struct InternalType; template <> struct InternalType<double> { typedef double scalar_t; }; template <> struct InternalType<float> { typedef float scalar_t; }; template <> struct InternalType<c10::Half> { typedef float scalar_t; }; //------------------------------------------------------------------------ // CUDA kernel. template <class T, int A> __global__ void bias_act_kernel(bias_act_kernel_params p) { typedef typename InternalType<T>::scalar_t scalar_t; int G = p.grad; scalar_t alpha = (scalar_t)p.alpha; scalar_t gain = (scalar_t)p.gain; scalar_t clamp = (scalar_t)p.clamp; scalar_t one = (scalar_t)1; scalar_t two = (scalar_t)2; scalar_t expRange = (scalar_t)80; scalar_t halfExpRange = (scalar_t)40; scalar_t seluScale = (scalar_t)1.0507009873554804934193349852946; scalar_t seluAlpha = (scalar_t)1.6732632423543772848170429916717; // Loop over elements. int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x; for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x) { // Load. scalar_t x = (scalar_t)((const T*)p.x)[xi]; scalar_t b = (p.b) ? (scalar_t)((const T*)p.b)[(xi / p.stepB) % p.sizeB] : 0; scalar_t xref = (p.xref) ? (scalar_t)((const T*)p.xref)[xi] : 0; scalar_t yref = (p.yref) ? (scalar_t)((const T*)p.yref)[xi] : 0; scalar_t dy = (p.dy) ? (scalar_t)((const T*)p.dy)[xi] : one; scalar_t yy = (gain != 0) ? yref / gain : 0; scalar_t y = 0; // Apply bias. ((G == 0) ? x : xref) += b; // linear if (A == 1) { if (G == 0) y = x; if (G == 1) y = x; } // relu if (A == 2) { if (G == 0) y = (x > 0) ? x : 0; if (G == 1) y = (yy > 0) ? x : 0; } // lrelu if (A == 3) { if (G == 0) y = (x > 0) ? x : x * alpha; if (G == 1) y = (yy > 0) ? x : x * alpha; } // tanh if (A == 4) { if (G == 0) { scalar_t c = exp(x); scalar_t d = one / c; y = (x < -expRange) ? -one : (x > expRange) ? one : (c - d) / (c + d); } if (G == 1) y = x * (one - yy * yy); if (G == 2) y = x * (one - yy * yy) * (-two * yy); } // sigmoid if (A == 5) { if (G == 0) y = (x < -expRange) ? 0 : one / (exp(-x) + one); if (G == 1) y = x * yy * (one - yy); if (G == 2) y = x * yy * (one - yy) * (one - two * yy); } // elu if (A == 6) { if (G == 0) y = (x >= 0) ? x : exp(x) - one; if (G == 1) y = (yy >= 0) ? x : x * (yy + one); if (G == 2) y = (yy >= 0) ? 0 : x * (yy + one); } // selu if (A == 7) { if (G == 0) y = (x >= 0) ? seluScale * x : (seluScale * seluAlpha) * (exp(x) - one); if (G == 1) y = (yy >= 0) ? x * seluScale : x * (yy + seluScale * seluAlpha); if (G == 2) y = (yy >= 0) ? 0 : x * (yy + seluScale * seluAlpha); } // softplus if (A == 8) { if (G == 0) y = (x > expRange) ? x : log(exp(x) + one); if (G == 1) y = x * (one - exp(-yy)); if (G == 2) { scalar_t c = exp(-yy); y = x * c * (one - c); } } // swish if (A == 9) { if (G == 0) y = (x < -expRange) ? 0 : x / (exp(-x) + one); else { scalar_t c = exp(xref); scalar_t d = c + one; if (G == 1) y = (xref > halfExpRange) ? x : x * c * (xref + d) / (d * d); else y = (xref > halfExpRange) ? 0 : x * c * (xref * (two - d) + two * d) / (d * d * d); yref = (xref < -expRange) ? 0 : xref / (exp(-xref) + one) * gain; } } // Apply gain. y *= gain * dy; // Clamp. if (clamp >= 0) { if (G == 0) y = (y > -clamp & y < clamp) ? y : (y >= 0) ? clamp : -clamp; else y = (yref > -clamp & yref < clamp) ? y : 0; } // Store. ((T*)p.y)[xi] = (T)y; } } //------------------------------------------------------------------------ // CUDA kernel selection. template <class T> void* choose_bias_act_kernel(const bias_act_kernel_params& p) { if (p.act == 1) return (void*)bias_act_kernel<T, 1>; if (p.act == 2) return (void*)bias_act_kernel<T, 2>; if (p.act == 3) return (void*)bias_act_kernel<T, 3>; if (p.act == 4) return (void*)bias_act_kernel<T, 4>; if (p.act == 5) return (void*)bias_act_kernel<T, 5>; if (p.act == 6) return (void*)bias_act_kernel<T, 6>; if (p.act == 7) return (void*)bias_act_kernel<T, 7>; if (p.act == 8) return (void*)bias_act_kernel<T, 8>; if (p.act == 9) return (void*)bias_act_kernel<T, 9>; return NULL; } //------------------------------------------------------------------------ // Template specializations. template void* choose_bias_act_kernel<double> (const bias_act_kernel_params& p); template void* choose_bias_act_kernel<float> (const bias_act_kernel_params& p); template void* choose_bias_act_kernel<c10::Half> (const bias_act_kernel_params& p); //------------------------------------------------------------------------
74e04830d4dd879a0b5962fef1199025c96e2d4d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #define size 64 #define threads 128 using namespace std; /* Jedan primer ThreadReduction-a __global__ void callOperation(int *v) { int tid = blockDim.x * blockIdx.x + threadIdx.x; int step = 1; int numOfThreads = blockDim.x; while (numOfThreads > 0) { if (tid < size) { int first = tid * step * 2; int second = first + step; v[first] += v[second]; } step <<= 1; numOfThreads >>= 1; } } */ __global__ void callOperationReduction(int *v) { int tid = threadIdx.x; int tidx = blockDim.x * blockIdx.x + threadIdx.x; for (unsigned int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (tid < offset) { v[tid] += v[tid + offset]; } __syncthreads(); } if (tid == 0) { v[blockIdx.x] = v[0]; } } __global__ void callOperationReductionSharedStatic(int *v) { __shared__ int s_v[size]; int tid = threadIdx.x; int tidx = blockDim.x * blockIdx.x + threadIdx.x; if (tidx < size) { s_v[tid] = v[tidx]; } __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (tid < offset) { s_v[tid] += s_v[tid + offset]; } __syncthreads(); } if (tid == 0) { v[blockIdx.x] = s_v[0]; } } __global__ void callOperationReductionSharedDynamic(int *v) { extern __shared__ int s_v[]; int tid = threadIdx.x; int tidx = blockDim.x * blockIdx.x + threadIdx.x; if (tidx < size) { s_v[tid] = v[tidx]; } __syncthreads(); for (unsigned int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (tid < offset) { s_v[tid] += s_v[tid + offset]; } __syncthreads(); } if (tid == 0) { v[blockIdx.x] = s_v[0]; } } int main() { int *v,*sum2, sum; int *d_v, *d_res; v = (int*)malloc(size * sizeof(int)); sum2 = (int*)malloc(size * sizeof(int)); for (int i = 0; i < size; i++) { v[i] = i; } cout << "Niz: " << endl; for (int i = 0; i < size; i++) { cout << v[i] << "\t"; } hipMalloc((void**)&d_v, size * sizeof(int)); hipMemcpy(d_v, v, size * sizeof(int), hipMemcpyHostToDevice); dim3 numberOfBlocks(size / threads + 1, 1, 1); dim3 numberOfThreads(threads, 1, 1); //callOperationReduction << <numberOfBlocks, numberOfThreads >> > (d_v); callOperationReductionSharedStatic << <numberOfBlocks, numberOfThreads >> > (d_v); //callOperationReductionSharedDynamic << <numberOfBlocks, numberOfThreads, size * sizeof(int) >> > (d_v); hipMemcpy(sum2, d_v, size * sizeof(int), hipMemcpyDeviceToHost); cout << "\n\nNiz: " << endl; for (int i = 0; i < size; i++) { cout <<sum2[i] << "\t"; } cout << "\n\nSum is: " << sum2[0] << endl; hipFree(d_v); free(v); free(sum2); hipDeviceReset(); cout << endl; system("PAUSE"); return 0; }
74e04830d4dd879a0b5962fef1199025c96e2d4d.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #define size 64 #define threads 128 using namespace std; /* Jedan primer ThreadReduction-a __global__ void callOperation(int *v) { int tid = blockDim.x * blockIdx.x + threadIdx.x; int step = 1; int numOfThreads = blockDim.x; while (numOfThreads > 0) { if (tid < size) { int first = tid * step * 2; int second = first + step; v[first] += v[second]; } step <<= 1; numOfThreads >>= 1; } } */ __global__ void callOperationReduction(int *v) { int tid = threadIdx.x; int tidx = blockDim.x * blockIdx.x + threadIdx.x; for (unsigned int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (tid < offset) { v[tid] += v[tid + offset]; } __syncthreads(); } if (tid == 0) { v[blockIdx.x] = v[0]; } } __global__ void callOperationReductionSharedStatic(int *v) { __shared__ int s_v[size]; int tid = threadIdx.x; int tidx = blockDim.x * blockIdx.x + threadIdx.x; if (tidx < size) { s_v[tid] = v[tidx]; } __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (tid < offset) { s_v[tid] += s_v[tid + offset]; } __syncthreads(); } if (tid == 0) { v[blockIdx.x] = s_v[0]; } } __global__ void callOperationReductionSharedDynamic(int *v) { extern __shared__ int s_v[]; int tid = threadIdx.x; int tidx = blockDim.x * blockIdx.x + threadIdx.x; if (tidx < size) { s_v[tid] = v[tidx]; } __syncthreads(); for (unsigned int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (tid < offset) { s_v[tid] += s_v[tid + offset]; } __syncthreads(); } if (tid == 0) { v[blockIdx.x] = s_v[0]; } } int main() { int *v,*sum2, sum; int *d_v, *d_res; v = (int*)malloc(size * sizeof(int)); sum2 = (int*)malloc(size * sizeof(int)); for (int i = 0; i < size; i++) { v[i] = i; } cout << "Niz: " << endl; for (int i = 0; i < size; i++) { cout << v[i] << "\t"; } cudaMalloc((void**)&d_v, size * sizeof(int)); cudaMemcpy(d_v, v, size * sizeof(int), cudaMemcpyHostToDevice); dim3 numberOfBlocks(size / threads + 1, 1, 1); dim3 numberOfThreads(threads, 1, 1); //callOperationReduction << <numberOfBlocks, numberOfThreads >> > (d_v); callOperationReductionSharedStatic << <numberOfBlocks, numberOfThreads >> > (d_v); //callOperationReductionSharedDynamic << <numberOfBlocks, numberOfThreads, size * sizeof(int) >> > (d_v); cudaMemcpy(sum2, d_v, size * sizeof(int), cudaMemcpyDeviceToHost); cout << "\n\nNiz: " << endl; for (int i = 0; i < size; i++) { cout <<sum2[i] << "\t"; } cout << "\n\nSum is: " << sum2[0] << endl; cudaFree(d_v); free(v); free(sum2); cudaDeviceReset(); cout << endl; system("PAUSE"); return 0; }
5f47a7992f02fe46dec8e0011124982dbd086ea1.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include "CAIPLib.h" #pragma comment(lib, "CAIPLib.lib") using namespace std; int main(int argc, char** argv) { caChar8* pFilename = NULL; caUInt16 numberOfIterations = 0; TImage image; if (argc != 5) { cout << "Incorrect count of arguments\n" << "Please, check out a format of input data.\n" << "It should be -f filename -n numbers_of_iterations\n"; return -1; } else if (argc == 5) { caUInt8 i = 1; while (i < argc) { if (strcmp(argv[i], "-f") == 0) { i++; pFilename = argv[i]; } else if (strcmp(argv[i], "-n") == 0) { i++; numberOfIterations = atoi(argv[i]); } i++; } } if (caCheckError(caLoadImage(pFilename, image))) return -1; if (caCheckError(caRgb2Gray(image, image))) return -1; if (caCheckError(caBoxBlur(image, image, numberOfIterations))) return -1; if (caCheckError(caSaveImage("result.tga\0", image))) return -1; if (caCheckError(caFreeImage(image))) return -1; system("pause"); }
5f47a7992f02fe46dec8e0011124982dbd086ea1.cu
#include <iostream> #include <cuda_runtime.h> #include "CAIPLib.h" #pragma comment(lib, "CAIPLib.lib") using namespace std; int main(int argc, char** argv) { caChar8* pFilename = NULL; caUInt16 numberOfIterations = 0; TImage image; if (argc != 5) { cout << "Incorrect count of arguments\n" << "Please, check out a format of input data.\n" << "It should be -f filename -n numbers_of_iterations\n"; return -1; } else if (argc == 5) { caUInt8 i = 1; while (i < argc) { if (strcmp(argv[i], "-f") == 0) { i++; pFilename = argv[i]; } else if (strcmp(argv[i], "-n") == 0) { i++; numberOfIterations = atoi(argv[i]); } i++; } } if (caCheckError(caLoadImage(pFilename, image))) return -1; if (caCheckError(caRgb2Gray(image, image))) return -1; if (caCheckError(caBoxBlur(image, image, numberOfIterations))) return -1; if (caCheckError(caSaveImage("result.tga\0", image))) return -1; if (caCheckError(caFreeImage(image))) return -1; system("pause"); }
4c733dc1b9104481f766a3bcef70a6cf71205aaa.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdio.h> // includes, project //basic includes, others may be needed depending on application #include <stdlib.h> #include <string.h> #include "mex.h" #include "matrix.h" #include "hip/hip_runtime.h" // Thread block size #define BSZ 128 #define MEM 70 #define IMSZ 11 #define IMSZBIG 21 #define imMEM 4000 #define NK 256 //number of blocks to run in each kernel #define pi 3.141592 #define min(a,b) (((a) < (b)) ? (a) : (b)) #define max(a,b) (((a) > (b)) ? (a) : (b)) //kernel_MLEFit<<<dimGrid, dimBlock>>>(ii, sz, BlockSize, fitnum, d_xarray, d_yarray, d_Narray, d_barray, d_fishermatrix, BlockSize); __global__ void kernel_guassiansampleblobs(int,int,int, float*,float*,float*, float*,float*,float*,float*,float*,float*); __global__ void kernel_guassianintegrateblobs(int,int,int, float*,float*,float*, float*,float*,float*,float*,float*,float*); //__device__ float PSF_xy(float x, int ii, float PSFSigma) { // norm=1.0/2.0/PSFSigma/PSFSigma; // return 1.0/2.0*(erf((ii-x+0.5)*sqrt(norm))-erf((ii-x-0.5)*sqrt(norm))); //}; // //__device__ float MODEL(float *x, float *y, float * Narray, int ii, int jj, float PSFSigma, float b, int N) { // float model; // model=b; // for (nn=0;nn<N;nn++) // model+=Narray[nn]*PSF_xy(xarray[nn], ii, PSFSigma)*PSF_xy(yarray[nn], jj, PSFSigma); // return model; // //}; void CUDAERRROR(const char *instr) { hipError_t errornum; const char *str; if (errornum = hipGetLastError()) { str = hipGetErrorString(errornum); hipDeviceReset(); //release context so future hipSetDevice calls work mexErrMsgIdAndTxt("CudaTemplate:CUDA", "%s: %s\nYou should clear this function in MATLAB for proper operation.\n", instr, str); } } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { int blockx; int threadx; int ii,iii,jj,kk,flag; int memblobsnum,ysz,xsz; float * xarray, * yarray, * Narray, *bg,*yt,*xl,*xsigma,*ysigma,*covariance,*im; float *d_xarray, *d_yarray, *d_Narray, *d_xsigma, *d_ysigma,*d_covariance,*d_im,*d_xl,*d_yt,*subim; const mwSize *datasize; int locr; mwSize imdim[2]; if (nrhs<9) mexErrMsgTxt("xsize,ysize,x_array, y_array, N_array, sigmaX, sigmaY, covariance, UseIntegrated_FLAG\n"); if (mxGetClassID(prhs[0])!=mxSINGLE_CLASS) mexErrMsgTxt("Data must be comprised of single floats!\n"); if (mxGetClassID(prhs[1])!=mxSINGLE_CLASS) mexErrMsgTxt("Data must be comprised of single floats!\n"); if (mxGetClassID(prhs[2])!=mxSINGLE_CLASS) mexErrMsgTxt("Data must be comprised of single floats!\n"); if (mxGetClassID(prhs[3])!=mxSINGLE_CLASS) mexErrMsgTxt("Data must be comprised of single floats!\n"); if (mxGetClassID(prhs[4])!=mxSINGLE_CLASS) mexErrMsgTxt("Data must be comprised of single floats!\n"); if (mxGetClassID(prhs[5])!=mxSINGLE_CLASS) mexErrMsgTxt("Data must be comprised of single floats!\n"); datasize=mxGetDimensions(prhs[2]); if (datasize[1]!=1) mexErrMsgTxt("xarray should be n X 1 array\n"); datasize=mxGetDimensions(prhs[3]); if (datasize[1]!=1) mexErrMsgTxt("xarray should be n X 1 array\n"); datasize=mxGetDimensions(prhs[4]); if (datasize[1]!=1) mexErrMsgTxt("xarray should be n X 1 array\n"); datasize=mxGetDimensions(prhs[5]); if (datasize[1]!=1) mexErrMsgTxt("xarray should be n X 1 array\n"); xsz =(float) mxGetScalar(prhs[0]); ysz =(float) mxGetScalar(prhs[1]); imdim[0]=xsz; imdim[1]=ysz; //PSFSigma=(float)mxGetScalar(prhs[1]); //matlab-dip_image convention xarray =(float *) mxGetData(prhs[2]); yarray =(float *) mxGetData(prhs[3]); Narray =(float *) mxGetData(prhs[4]); xsigma =(float *)mxGetData(prhs[5]); ysigma =(float *)mxGetData(prhs[6]); covariance =(float *)mxGetData(prhs[7]); flag =(float) mxGetScalar(prhs[8]); int blobn=datasize[0]; float maxsigma=-1; float sigma; for(ii=0;ii<blobn;ii++){ sigma=sqrt(pow(xsigma[ii],2)+pow(ysigma[ii],2)); maxsigma=max(maxsigma,sigma); } int sz=(int) round(float(8*maxsigma)); sz=min(sz,20); if ((flag!=1)&&(flag!=0)) mexErrMsgTxt("flag can only be 0 or 1\n"); // over allocate for additional thread reading error int BlockSize=min(ceil((float) 15000/4/sz/sz),64); memblobsnum=(int)ceil((float)datasize[0]/BlockSize)+128; //mexPrintf("Starting CUDA Malloc\n"); CUDAERRROR("P1"); hipMalloc(&d_xarray, memblobsnum*BlockSize*sizeof(float)); CUDAERRROR("M1"); hipMemset(d_xarray, 0, memblobsnum*BlockSize*sizeof(float)); hipMemcpy(d_xarray, xarray, datasize[0]*sizeof(float), hipMemcpyHostToDevice); CUDAERRROR("S1"); hipMalloc((void**)&d_yarray, memblobsnum*BlockSize*sizeof(float)); hipMemset(d_yarray, 0, memblobsnum*BlockSize*sizeof(float)); hipMemcpy(d_yarray, yarray,datasize[0]*sizeof(float), hipMemcpyHostToDevice); CUDAERRROR("M2"); hipMalloc((void**)&d_Narray, memblobsnum*BlockSize*sizeof(float)); hipMemset(d_Narray, 0, memblobsnum*BlockSize*sizeof(float)); hipMemcpy(d_Narray, Narray,datasize[0]*sizeof(float), hipMemcpyHostToDevice); CUDAERRROR("M3"); hipMalloc((void**)&d_xsigma, memblobsnum*BlockSize*sizeof(float)); hipMemset(d_xsigma, 0, memblobsnum*BlockSize*sizeof(float)); hipMemcpy(d_xsigma, xsigma,datasize[0]*sizeof(float), hipMemcpyHostToDevice); CUDAERRROR("M4"); hipMalloc((void**)&d_ysigma, memblobsnum*BlockSize*sizeof(float)); hipMemset(d_ysigma, 0, memblobsnum*BlockSize*sizeof(float)); hipMemcpy(d_ysigma, ysigma,datasize[0]*sizeof(float), hipMemcpyHostToDevice); CUDAERRROR("M5"); hipMalloc((void**)&d_covariance, memblobsnum*BlockSize*sizeof(float)); hipMemset(d_covariance, 0, memblobsnum*BlockSize*sizeof(float)); hipMemcpy(d_covariance, covariance,datasize[0]*sizeof(float), hipMemcpyHostToDevice); CUDAERRROR("M6"); hipMalloc((void**)&d_im, sz*sz*memblobsnum*BlockSize*sizeof(float)); hipMemset(d_im, 0, sz*sz*memblobsnum*BlockSize*sizeof(float)); hipMalloc((void**)&d_xl, memblobsnum*BlockSize*sizeof(float)); hipMemset(d_xl, 0, memblobsnum*BlockSize*sizeof(float)); hipMalloc((void**)&d_yt, memblobsnum*BlockSize*sizeof(float)); hipMemset(d_yt, 0, memblobsnum*BlockSize*sizeof(float)); //only run NK blocks in each kernel int numK=(int)ceil((float)datasize[0]/BlockSize/NK); for (int ii=0;ii<numK;ii++) { blockx = min(ceil(((float)(((float)datasize[0])/BlockSize)-ii*NK)), NK); blockx = max(blockx,1); threadx= BlockSize; dim3 dimBlock(threadx); dim3 dimGrid(blockx); //printf("threadx: %d,blockx: %d\n", threadx, blockx); switch (flag) { case 0: hipLaunchKernelGGL(( kernel_guassiansampleblobs), dim3(dimGrid), dim3(dimBlock), 0, 0, ii,BlockSize,sz, d_xarray,d_yarray,d_Narray, d_xsigma,d_ysigma,d_covariance,d_im,d_xl,d_yt); break;//15x15 images, 64 per block case 1: hipLaunchKernelGGL(( kernel_guassianintegrateblobs), dim3(dimGrid), dim3(dimBlock), 0, 0, ii,BlockSize,sz, d_xarray,d_yarray,d_Narray, d_xsigma,d_ysigma,d_covariance,d_im,d_xl,d_yt); break;//15x15 images, 64 per block } CUDAERRROR("kernel"); //mexEvalString("pause(0.001)"); } subim= (float * )malloc(datasize[0]*sz*sz*sizeof(float)); xl=(float * )malloc(datasize[0]*sizeof(float)); yt=(float * )malloc(datasize[0]*sizeof(float)); //reconstruct images plhs[0]=mxCreateNumericArray(2, imdim, mxSINGLE_CLASS, mxREAL); im=(float *)mxGetData(plhs[0]); hipMemcpy(subim, d_im, datasize[0]*sz*sz*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(xl, d_xl, datasize[0]*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(yt, d_yt, datasize[0]*sizeof(float), hipMemcpyDeviceToHost); for(kk=0;kk<blobn;kk++){ for(jj=0;jj<sz;jj++){ for(iii=0;iii<sz;iii++){ if ((((int)xl[kk]+iii)<(xsz-1))&&(((int)yt[kk]+jj)<(ysz-1))){ locr=((int)yt[kk]+jj)*xsz+(int)xl[kk]+iii; if((subim[kk*sz*sz+jj*sz+iii]>0)&&(subim[kk*sz*sz+jj*sz+iii]<100000)&&(locr>=0)&&(locr<=((xsz-1)*(ysz)))) im[locr]+=subim[kk*sz*sz+jj*sz+iii]; } } } } free(subim); free(xl); free(yt); hipFree(d_xarray); hipFree(d_yarray); hipFree(d_Narray); hipFree(d_xsigma); hipFree(d_ysigma); hipFree(d_covariance); hipFree(d_im); hipFree(d_xl); hipFree(d_yt); hipDeviceReset(); } //kernel_guassiansampleblobs<<<dimGrid, dimBlock>>>(ii,blockx,BlockSize,sz, d_xarray,d_yarray,d_Narray, d_xsigma,d_ysigma,d_covariance,d_im,d_xl,d_yt); //15x15 images, 64 per block __global__ void kernel_guassiansampleblobs(int iiK,int BlockSize, int sz, float *d_xarray,float *d_yarray,float *d_Narray, float *d_xsigma,float *d_ysigma,float *d_covariance,float *d_im,float *d_xl,float *d_yt ) { int tx = threadIdx.x; //matrix number index int bx = blockIdx.x; float x,y,xsigma,ysigma,covariance,N; float xl; float yt; int ii,jj,pixelx,pixely; float model;// __shared__ float s_im[imMEM]; bx=bx+iiK*NK; //import datas from device to shared memory x=d_xarray[bx*BlockSize+tx]; y=d_yarray[bx*BlockSize+tx]; N=d_Narray[bx*BlockSize+tx]; xsigma=d_xsigma[bx*BlockSize+tx]; ysigma=d_ysigma[bx*BlockSize+tx]; covariance=d_covariance[bx*BlockSize+tx]; xl=round(x)-round(float (sz/2-1)); xl=max(xl,0); yt=round(y)-round(float (sz/2-1)); yt=max(yt,0); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { // generate model for pixel ii jj pixelx=ii; pixely=jj; s_im[tx*sz*sz+jj*sz+ii]=N/(2*pi*xsigma*ysigma*sqrt(1-pow(covariance,2)))*exp(-1/(2*(1-pow(covariance,2)))*(pow(x-xl-pixelx,2)/pow(xsigma,2)+pow(y-yt-pixely,2)/pow(ysigma,2)-2*covariance*(x-xl-pixelx)*(y-yt-pixely)/(xsigma*ysigma))); } for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { d_im[bx*BlockSize*sz*sz+tx*sz*sz+jj*sz+ii]=s_im[tx*sz*sz+jj*sz+ii]; d_xl[bx*BlockSize+tx]=xl; d_yt[bx*BlockSize+tx]=yt; } return; } __global__ void kernel_guassianintegrateblobs(int iiK,int BlockSize, int sz, float *d_xarray,float *d_yarray,float *d_Narray, float *d_xsigma,float *d_ysigma,float *d_covariance,float *d_im,float *d_xl,float *d_yt ) { int tx = threadIdx.x; //matrix number index int bx = blockIdx.x; float x,y,xsigma,ysigma,covariance,N; float xl; float yt; int ii,jj,pixelx,pixely; float model;// __shared__ float s_im[imMEM]; bx=bx+iiK*NK; //import datas from device to shared memory x=d_xarray[bx*BlockSize+tx]; y=d_yarray[bx*BlockSize+tx]; N=d_Narray[bx*BlockSize+tx]; xsigma=d_xsigma[bx*BlockSize+tx]; ysigma=d_ysigma[bx*BlockSize+tx]; covariance=d_covariance[bx*BlockSize+tx]; xl=round(x)-round(float (sz/2-1)); xl=max(xl,0); yt=round(y)-round(float (sz/2-1)); yt=max(yt,0); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { // generate model for pixel ii jj pixelx=ii; pixely=jj; s_im[tx*sz*sz+jj*sz+ii]=N/4*(erf((x-xl-pixelx-0.5)/sqrt(2*pow(xsigma,2)))-erf((x-xl-pixelx+0.5)/sqrt(2*pow(xsigma,2))))*(erf((y-yt-pixely-0.5)/sqrt(2*pow(ysigma,2)))-erf((y-yt-pixely+0.5)/sqrt(2*pow(ysigma,2)))); //exp(-1/(2*(1-pow(covariance,2)))*(pow(x-xl-pixelx,2)/pow(xsigma,2)+pow(y-yt-pixely,2)/pow(ysigma,2)-2*covariance*(x-xl-pixelx)*(y-yt-pixely)/(xsigma*ysigma))); } for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { d_im[bx*BlockSize*sz*sz+tx*sz*sz+jj*sz+ii]=s_im[tx*sz*sz+jj*sz+ii]; d_xl[bx*BlockSize+tx]=xl; d_yt[bx*BlockSize+tx]=yt; } return; } //END OF KERNAL FUNCTION
4c733dc1b9104481f766a3bcef70a6cf71205aaa.cu
// includes, system #include <stdio.h> // includes, project //basic includes, others may be needed depending on application #include <stdlib.h> #include <string.h> #include "mex.h" #include "matrix.h" #include "cuda_runtime.h" // Thread block size #define BSZ 128 #define MEM 70 #define IMSZ 11 #define IMSZBIG 21 #define imMEM 4000 #define NK 256 //number of blocks to run in each kernel #define pi 3.141592 #define min(a,b) (((a) < (b)) ? (a) : (b)) #define max(a,b) (((a) > (b)) ? (a) : (b)) //kernel_MLEFit<<<dimGrid, dimBlock>>>(ii, sz, BlockSize, fitnum, d_xarray, d_yarray, d_Narray, d_barray, d_fishermatrix, BlockSize); __global__ void kernel_guassiansampleblobs(int,int,int, float*,float*,float*, float*,float*,float*,float*,float*,float*); __global__ void kernel_guassianintegrateblobs(int,int,int, float*,float*,float*, float*,float*,float*,float*,float*,float*); //__device__ float PSF_xy(float x, int ii, float PSFSigma) { // norm=1.0/2.0/PSFSigma/PSFSigma; // return 1.0/2.0*(erf((ii-x+0.5)*sqrt(norm))-erf((ii-x-0.5)*sqrt(norm))); //}; // //__device__ float MODEL(float *x, float *y, float * Narray, int ii, int jj, float PSFSigma, float b, int N) { // float model; // model=b; // for (nn=0;nn<N;nn++) // model+=Narray[nn]*PSF_xy(xarray[nn], ii, PSFSigma)*PSF_xy(yarray[nn], jj, PSFSigma); // return model; // //}; void CUDAERRROR(const char *instr) { cudaError_t errornum; const char *str; if (errornum = cudaGetLastError()) { str = cudaGetErrorString(errornum); cudaThreadExit(); //release context so future cudaSetDevice calls work mexErrMsgIdAndTxt("CudaTemplate:CUDA", "%s: %s\nYou should clear this function in MATLAB for proper operation.\n", instr, str); } } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { int blockx; int threadx; int ii,iii,jj,kk,flag; int memblobsnum,ysz,xsz; float * xarray, * yarray, * Narray, *bg,*yt,*xl,*xsigma,*ysigma,*covariance,*im; float *d_xarray, *d_yarray, *d_Narray, *d_xsigma, *d_ysigma,*d_covariance,*d_im,*d_xl,*d_yt,*subim; const mwSize *datasize; int locr; mwSize imdim[2]; if (nrhs<9) mexErrMsgTxt("xsize,ysize,x_array, y_array, N_array, sigmaX, sigmaY, covariance, UseIntegrated_FLAG\n"); if (mxGetClassID(prhs[0])!=mxSINGLE_CLASS) mexErrMsgTxt("Data must be comprised of single floats!\n"); if (mxGetClassID(prhs[1])!=mxSINGLE_CLASS) mexErrMsgTxt("Data must be comprised of single floats!\n"); if (mxGetClassID(prhs[2])!=mxSINGLE_CLASS) mexErrMsgTxt("Data must be comprised of single floats!\n"); if (mxGetClassID(prhs[3])!=mxSINGLE_CLASS) mexErrMsgTxt("Data must be comprised of single floats!\n"); if (mxGetClassID(prhs[4])!=mxSINGLE_CLASS) mexErrMsgTxt("Data must be comprised of single floats!\n"); if (mxGetClassID(prhs[5])!=mxSINGLE_CLASS) mexErrMsgTxt("Data must be comprised of single floats!\n"); datasize=mxGetDimensions(prhs[2]); if (datasize[1]!=1) mexErrMsgTxt("xarray should be n X 1 array\n"); datasize=mxGetDimensions(prhs[3]); if (datasize[1]!=1) mexErrMsgTxt("xarray should be n X 1 array\n"); datasize=mxGetDimensions(prhs[4]); if (datasize[1]!=1) mexErrMsgTxt("xarray should be n X 1 array\n"); datasize=mxGetDimensions(prhs[5]); if (datasize[1]!=1) mexErrMsgTxt("xarray should be n X 1 array\n"); xsz =(float) mxGetScalar(prhs[0]); ysz =(float) mxGetScalar(prhs[1]); imdim[0]=xsz; imdim[1]=ysz; //PSFSigma=(float)mxGetScalar(prhs[1]); //matlab-dip_image convention xarray =(float *) mxGetData(prhs[2]); yarray =(float *) mxGetData(prhs[3]); Narray =(float *) mxGetData(prhs[4]); xsigma =(float *)mxGetData(prhs[5]); ysigma =(float *)mxGetData(prhs[6]); covariance =(float *)mxGetData(prhs[7]); flag =(float) mxGetScalar(prhs[8]); int blobn=datasize[0]; float maxsigma=-1; float sigma; for(ii=0;ii<blobn;ii++){ sigma=sqrt(pow(xsigma[ii],2)+pow(ysigma[ii],2)); maxsigma=max(maxsigma,sigma); } int sz=(int) round(float(8*maxsigma)); sz=min(sz,20); if ((flag!=1)&&(flag!=0)) mexErrMsgTxt("flag can only be 0 or 1\n"); // over allocate for additional thread reading error int BlockSize=min(ceil((float) 15000/4/sz/sz),64); memblobsnum=(int)ceil((float)datasize[0]/BlockSize)+128; //mexPrintf("Starting CUDA Malloc\n"); CUDAERRROR("P1"); cudaMalloc(&d_xarray, memblobsnum*BlockSize*sizeof(float)); CUDAERRROR("M1"); cudaMemset(d_xarray, 0, memblobsnum*BlockSize*sizeof(float)); cudaMemcpy(d_xarray, xarray, datasize[0]*sizeof(float), cudaMemcpyHostToDevice); CUDAERRROR("S1"); cudaMalloc((void**)&d_yarray, memblobsnum*BlockSize*sizeof(float)); cudaMemset(d_yarray, 0, memblobsnum*BlockSize*sizeof(float)); cudaMemcpy(d_yarray, yarray,datasize[0]*sizeof(float), cudaMemcpyHostToDevice); CUDAERRROR("M2"); cudaMalloc((void**)&d_Narray, memblobsnum*BlockSize*sizeof(float)); cudaMemset(d_Narray, 0, memblobsnum*BlockSize*sizeof(float)); cudaMemcpy(d_Narray, Narray,datasize[0]*sizeof(float), cudaMemcpyHostToDevice); CUDAERRROR("M3"); cudaMalloc((void**)&d_xsigma, memblobsnum*BlockSize*sizeof(float)); cudaMemset(d_xsigma, 0, memblobsnum*BlockSize*sizeof(float)); cudaMemcpy(d_xsigma, xsigma,datasize[0]*sizeof(float), cudaMemcpyHostToDevice); CUDAERRROR("M4"); cudaMalloc((void**)&d_ysigma, memblobsnum*BlockSize*sizeof(float)); cudaMemset(d_ysigma, 0, memblobsnum*BlockSize*sizeof(float)); cudaMemcpy(d_ysigma, ysigma,datasize[0]*sizeof(float), cudaMemcpyHostToDevice); CUDAERRROR("M5"); cudaMalloc((void**)&d_covariance, memblobsnum*BlockSize*sizeof(float)); cudaMemset(d_covariance, 0, memblobsnum*BlockSize*sizeof(float)); cudaMemcpy(d_covariance, covariance,datasize[0]*sizeof(float), cudaMemcpyHostToDevice); CUDAERRROR("M6"); cudaMalloc((void**)&d_im, sz*sz*memblobsnum*BlockSize*sizeof(float)); cudaMemset(d_im, 0, sz*sz*memblobsnum*BlockSize*sizeof(float)); cudaMalloc((void**)&d_xl, memblobsnum*BlockSize*sizeof(float)); cudaMemset(d_xl, 0, memblobsnum*BlockSize*sizeof(float)); cudaMalloc((void**)&d_yt, memblobsnum*BlockSize*sizeof(float)); cudaMemset(d_yt, 0, memblobsnum*BlockSize*sizeof(float)); //only run NK blocks in each kernel int numK=(int)ceil((float)datasize[0]/BlockSize/NK); for (int ii=0;ii<numK;ii++) { blockx = min(ceil(((float)(((float)datasize[0])/BlockSize)-ii*NK)), NK); blockx = max(blockx,1); threadx= BlockSize; dim3 dimBlock(threadx); dim3 dimGrid(blockx); //printf("threadx: %d,blockx: %d\n", threadx, blockx); switch (flag) { case 0: kernel_guassiansampleblobs<<<dimGrid, dimBlock>>>(ii,BlockSize,sz, d_xarray,d_yarray,d_Narray, d_xsigma,d_ysigma,d_covariance,d_im,d_xl,d_yt); break;//15x15 images, 64 per block case 1: kernel_guassianintegrateblobs<<<dimGrid, dimBlock>>>(ii,BlockSize,sz, d_xarray,d_yarray,d_Narray, d_xsigma,d_ysigma,d_covariance,d_im,d_xl,d_yt); break;//15x15 images, 64 per block } CUDAERRROR("kernel"); //mexEvalString("pause(0.001)"); } subim= (float * )malloc(datasize[0]*sz*sz*sizeof(float)); xl=(float * )malloc(datasize[0]*sizeof(float)); yt=(float * )malloc(datasize[0]*sizeof(float)); //reconstruct images plhs[0]=mxCreateNumericArray(2, imdim, mxSINGLE_CLASS, mxREAL); im=(float *)mxGetData(plhs[0]); cudaMemcpy(subim, d_im, datasize[0]*sz*sz*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(xl, d_xl, datasize[0]*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(yt, d_yt, datasize[0]*sizeof(float), cudaMemcpyDeviceToHost); for(kk=0;kk<blobn;kk++){ for(jj=0;jj<sz;jj++){ for(iii=0;iii<sz;iii++){ if ((((int)xl[kk]+iii)<(xsz-1))&&(((int)yt[kk]+jj)<(ysz-1))){ locr=((int)yt[kk]+jj)*xsz+(int)xl[kk]+iii; if((subim[kk*sz*sz+jj*sz+iii]>0)&&(subim[kk*sz*sz+jj*sz+iii]<100000)&&(locr>=0)&&(locr<=((xsz-1)*(ysz)))) im[locr]+=subim[kk*sz*sz+jj*sz+iii]; } } } } free(subim); free(xl); free(yt); cudaFree(d_xarray); cudaFree(d_yarray); cudaFree(d_Narray); cudaFree(d_xsigma); cudaFree(d_ysigma); cudaFree(d_covariance); cudaFree(d_im); cudaFree(d_xl); cudaFree(d_yt); cudaDeviceReset(); } //kernel_guassiansampleblobs<<<dimGrid, dimBlock>>>(ii,blockx,BlockSize,sz, d_xarray,d_yarray,d_Narray, d_xsigma,d_ysigma,d_covariance,d_im,d_xl,d_yt); //15x15 images, 64 per block __global__ void kernel_guassiansampleblobs(int iiK,int BlockSize, int sz, float *d_xarray,float *d_yarray,float *d_Narray, float *d_xsigma,float *d_ysigma,float *d_covariance,float *d_im,float *d_xl,float *d_yt ) { int tx = threadIdx.x; //matrix number index int bx = blockIdx.x; float x,y,xsigma,ysigma,covariance,N; float xl; float yt; int ii,jj,pixelx,pixely; float model;// __shared__ float s_im[imMEM]; bx=bx+iiK*NK; //import datas from device to shared memory x=d_xarray[bx*BlockSize+tx]; y=d_yarray[bx*BlockSize+tx]; N=d_Narray[bx*BlockSize+tx]; xsigma=d_xsigma[bx*BlockSize+tx]; ysigma=d_ysigma[bx*BlockSize+tx]; covariance=d_covariance[bx*BlockSize+tx]; xl=round(x)-round(float (sz/2-1)); xl=max(xl,0); yt=round(y)-round(float (sz/2-1)); yt=max(yt,0); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { // generate model for pixel ii jj pixelx=ii; pixely=jj; s_im[tx*sz*sz+jj*sz+ii]=N/(2*pi*xsigma*ysigma*sqrt(1-pow(covariance,2)))*exp(-1/(2*(1-pow(covariance,2)))*(pow(x-xl-pixelx,2)/pow(xsigma,2)+pow(y-yt-pixely,2)/pow(ysigma,2)-2*covariance*(x-xl-pixelx)*(y-yt-pixely)/(xsigma*ysigma))); } for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { d_im[bx*BlockSize*sz*sz+tx*sz*sz+jj*sz+ii]=s_im[tx*sz*sz+jj*sz+ii]; d_xl[bx*BlockSize+tx]=xl; d_yt[bx*BlockSize+tx]=yt; } return; } __global__ void kernel_guassianintegrateblobs(int iiK,int BlockSize, int sz, float *d_xarray,float *d_yarray,float *d_Narray, float *d_xsigma,float *d_ysigma,float *d_covariance,float *d_im,float *d_xl,float *d_yt ) { int tx = threadIdx.x; //matrix number index int bx = blockIdx.x; float x,y,xsigma,ysigma,covariance,N; float xl; float yt; int ii,jj,pixelx,pixely; float model;// __shared__ float s_im[imMEM]; bx=bx+iiK*NK; //import datas from device to shared memory x=d_xarray[bx*BlockSize+tx]; y=d_yarray[bx*BlockSize+tx]; N=d_Narray[bx*BlockSize+tx]; xsigma=d_xsigma[bx*BlockSize+tx]; ysigma=d_ysigma[bx*BlockSize+tx]; covariance=d_covariance[bx*BlockSize+tx]; xl=round(x)-round(float (sz/2-1)); xl=max(xl,0); yt=round(y)-round(float (sz/2-1)); yt=max(yt,0); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { // generate model for pixel ii jj pixelx=ii; pixely=jj; s_im[tx*sz*sz+jj*sz+ii]=N/4*(erf((x-xl-pixelx-0.5)/sqrt(2*pow(xsigma,2)))-erf((x-xl-pixelx+0.5)/sqrt(2*pow(xsigma,2))))*(erf((y-yt-pixely-0.5)/sqrt(2*pow(ysigma,2)))-erf((y-yt-pixely+0.5)/sqrt(2*pow(ysigma,2)))); //exp(-1/(2*(1-pow(covariance,2)))*(pow(x-xl-pixelx,2)/pow(xsigma,2)+pow(y-yt-pixely,2)/pow(ysigma,2)-2*covariance*(x-xl-pixelx)*(y-yt-pixely)/(xsigma*ysigma))); } for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { d_im[bx*BlockSize*sz*sz+tx*sz*sz+jj*sz+ii]=s_im[tx*sz*sz+jj*sz+ii]; d_xl[bx*BlockSize+tx]=xl; d_yt[bx*BlockSize+tx]=yt; } return; } //END OF KERNAL FUNCTION
a02865ecc94bc8be7176fcc57fff6dfd02743275.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <hip/hip_runtime.h> #include <stdint.h> extern "C" { #include "png_util.h" } #define T 16 /** * Uses kernel to iterate through image pixels. * Calculates the color for each pixel from the equation for a Julia Fractal. */ __global__ void cudaDrawKernel(double cR, double cI, int size, int iter, int64_t *c_julia) { double newZR, newZI; int x,y; x = (threadIdx.x + blockDim.x*blockIdx.x); y = (threadIdx.y + blockDim.y*blockIdx.y); if (x<size && y<size) { newZR = 1.5*(x-size*0.5)/(size*0.5); newZI = (y-size*0.5)/(size*0.5); double oldZR, oldZI, color; int i, count = 0; for (i = 0; i < iter; i++) { oldZR = newZR; oldZI = newZI; newZR = oldZR*oldZR - oldZI*oldZI + cR; newZI = (2.f)*oldZR*oldZI + cI; if ((newZR*newZR + newZI*newZI) <= 4.0) { count++; } } color = sqrt((double)count); c_julia[y+x*size] = color; } } /** * Sets image variables. * Starts Cuda kernel. * Creates Image from data. */ int main(int argc, char** argv) { int size = atoi(argv[1]); int iter = atoi(argv[2]); double cR = -0.778; double cI = -0.116; /* Start Cuda Time */ hipEvent_t tic, toc; hipEventCreate(&tic); hipEventCreate(&toc); hipEventRecord(tic, 0); /* Allocate and Copy Memory for Cuda */ int64_t *h_julia = (int64_t*) calloc(size*size, sizeof(int64_t)); int64_t *c_julia; hipMalloc(&c_julia, size*size*sizeof(int64_t)); hipMemcpy(c_julia, h_julia, size*size*sizeof(int64_t), hipMemcpyHostToDevice); /* Run the kernel */ int g = (size+T-1)/T; dim3 gDim(g, g); dim3 bDim(T, T); hipLaunchKernelGGL(( cudaDrawKernel) , dim3(gDim), dim3(bDim) , 0, 0, cR, cI, size, iter, c_julia); /* Copy Memory back from Cuda */ hipMemcpy(h_julia, c_julia, size*size*sizeof(int64_t), hipMemcpyDeviceToHost); /* End Cuda Time */ hipEventRecord(toc, 0); hipEventSynchronize(toc); float elapsed; hipEventElapsedTime(&elapsed, tic, toc); printf("Elapsed time: %g\n", elapsed/1000.0); /* Image Creation */ double timeA = clock(); FILE *png = fopen("CudaJuliaFractal.png", "w"); write_hot_png(png, size, size, h_julia, 0, 80); fclose(png); double timeB = clock(); double elapsedPic = (timeB-timeA)/CLOCKS_PER_SEC; printf("Image creation time: %f\n", elapsedPic); return 0; }
a02865ecc94bc8be7176fcc57fff6dfd02743275.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <cuda.h> #include <stdint.h> extern "C" { #include "png_util.h" } #define T 16 /** * Uses kernel to iterate through image pixels. * Calculates the color for each pixel from the equation for a Julia Fractal. */ __global__ void cudaDrawKernel(double cR, double cI, int size, int iter, int64_t *c_julia) { double newZR, newZI; int x,y; x = (threadIdx.x + blockDim.x*blockIdx.x); y = (threadIdx.y + blockDim.y*blockIdx.y); if (x<size && y<size) { newZR = 1.5*(x-size*0.5)/(size*0.5); newZI = (y-size*0.5)/(size*0.5); double oldZR, oldZI, color; int i, count = 0; for (i = 0; i < iter; i++) { oldZR = newZR; oldZI = newZI; newZR = oldZR*oldZR - oldZI*oldZI + cR; newZI = (2.f)*oldZR*oldZI + cI; if ((newZR*newZR + newZI*newZI) <= 4.0) { count++; } } color = sqrt((double)count); c_julia[y+x*size] = color; } } /** * Sets image variables. * Starts Cuda kernel. * Creates Image from data. */ int main(int argc, char** argv) { int size = atoi(argv[1]); int iter = atoi(argv[2]); double cR = -0.778; double cI = -0.116; /* Start Cuda Time */ cudaEvent_t tic, toc; cudaEventCreate(&tic); cudaEventCreate(&toc); cudaEventRecord(tic, 0); /* Allocate and Copy Memory for Cuda */ int64_t *h_julia = (int64_t*) calloc(size*size, sizeof(int64_t)); int64_t *c_julia; cudaMalloc(&c_julia, size*size*sizeof(int64_t)); cudaMemcpy(c_julia, h_julia, size*size*sizeof(int64_t), cudaMemcpyHostToDevice); /* Run the kernel */ int g = (size+T-1)/T; dim3 gDim(g, g); dim3 bDim(T, T); cudaDrawKernel <<< gDim, bDim >>> (cR, cI, size, iter, c_julia); /* Copy Memory back from Cuda */ cudaMemcpy(h_julia, c_julia, size*size*sizeof(int64_t), cudaMemcpyDeviceToHost); /* End Cuda Time */ cudaEventRecord(toc, 0); cudaEventSynchronize(toc); float elapsed; cudaEventElapsedTime(&elapsed, tic, toc); printf("Elapsed time: %g\n", elapsed/1000.0); /* Image Creation */ double timeA = clock(); FILE *png = fopen("CudaJuliaFractal.png", "w"); write_hot_png(png, size, size, h_julia, 0, 80); fclose(png); double timeB = clock(); double elapsedPic = (timeB-timeA)/CLOCKS_PER_SEC; printf("Image creation time: %f\n", elapsedPic); return 0; }
1e8568f64ccd4c153a7eea4ae9eff035c639d307.hip
// !!! This is a file automatically generated by hipify!!! #include "Demo.h" #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> bool initCUDA() { int count = 0; int i = 0; hipGetDeviceCount(&count); //? if(count == 0) { //~~. fprintf(stderr, "There is no device.\n"); return false; } hipDeviceProp_t prop; for(i = 0; i < count; i++) {//: if(hipGetDeviceProperties(&prop, i) == hipSuccess) { // if(prop.major >= 1) { // break; // } hipDeviceProp_t sDevProp = prop; printf( "%d \n", i); printf( "Device name: %s\n", sDevProp.name ); printf( "Device memory: %lld\n", (long long)sDevProp.totalGlobalMem ); printf( "Shared Memory per-block: %d\n", (int)sDevProp.sharedMemPerBlock ); printf( "Register per-block: %d\n", sDevProp.regsPerBlock ); printf( "Warp size: %d\n", sDevProp.warpSize ); printf( "Memory pitch: %lld\n", (long long)sDevProp.memPitch ); printf( "Constant Memory: %lld\n", (long long)sDevProp.totalConstMem ); printf( "Max thread per-block: %d\n", sDevProp.maxThreadsPerBlock ); printf( "Max thread dim: ( %d, %d, %d )\n", sDevProp.maxThreadsDim[0], sDevProp.maxThreadsDim[1], sDevProp.maxThreadsDim[2] ); printf( "Max grid size: ( %d, %d, %d )\n", sDevProp.maxGridSize[0], sDevProp.maxGridSize[1], sDevProp.maxGridSize[2] ); printf( "Compute ability Ver: %d.%d\n", sDevProp.major, sDevProp.minor ); printf( "Clock: %d\n", sDevProp.clockRate ); printf( "textureAlignment: %d\n", sDevProp.textureAlignment ); hipSetDevice(i); printf("\n CUDA initialized.\n"); } } // if(i == count) { // fprintf(stderr, "There is no device supporting CUDA.\n"); // return false; // } return true; } void print_usage_main() { puts("Demo Usage -options"); puts("-word_sim word similarity calculation"); puts("-doc_dup document duplicate detection"); puts("-doc_clustering document clustering"); } int main(int argc, char** argv) { if (argc < 2) { print_usage_main(); return 0; } initCUDA(); if (strcmp(argv[1], "-word_sim") == 0) { word_similarity_test(argc, argv); } else if (strcmp(argv[1], "-doc_dup") == 0) { doc_dup_detection_test(argc, argv); } else if (strcmp(argv[1], "-doc_clustering") == 0) { doc_clustering_test(argc, argv); } else { print_usage_main(); return 0; } }
1e8568f64ccd4c153a7eea4ae9eff035c639d307.cu
#include "Demo.h" #include <stdio.h> #include <string.h> #include <cuda_runtime.h> bool initCUDA() { int count = 0; int i = 0; cudaGetDeviceCount(&count); //看看有多少个设备? if(count == 0) { //哈哈~~没有设备. fprintf(stderr, "There is no device.\n"); return false; } cudaDeviceProp prop; for(i = 0; i < count; i++) {//逐个列出设备属性: if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { // if(prop.major >= 1) { // break; // } cudaDeviceProp sDevProp = prop; printf( "%d \n", i); printf( "Device name: %s\n", sDevProp.name ); printf( "Device memory: %lld\n", (long long)sDevProp.totalGlobalMem ); printf( "Shared Memory per-block: %d\n", (int)sDevProp.sharedMemPerBlock ); printf( "Register per-block: %d\n", sDevProp.regsPerBlock ); printf( "Warp size: %d\n", sDevProp.warpSize ); printf( "Memory pitch: %lld\n", (long long)sDevProp.memPitch ); printf( "Constant Memory: %lld\n", (long long)sDevProp.totalConstMem ); printf( "Max thread per-block: %d\n", sDevProp.maxThreadsPerBlock ); printf( "Max thread dim: ( %d, %d, %d )\n", sDevProp.maxThreadsDim[0], sDevProp.maxThreadsDim[1], sDevProp.maxThreadsDim[2] ); printf( "Max grid size: ( %d, %d, %d )\n", sDevProp.maxGridSize[0], sDevProp.maxGridSize[1], sDevProp.maxGridSize[2] ); printf( "Compute ability Ver: %d.%d\n", sDevProp.major, sDevProp.minor ); printf( "Clock: %d\n", sDevProp.clockRate ); printf( "textureAlignment: %d\n", sDevProp.textureAlignment ); cudaSetDevice(i); printf("\n CUDA initialized.\n"); } } // if(i == count) { // fprintf(stderr, "There is no device supporting CUDA.\n"); // return false; // } return true; } void print_usage_main() { puts("Demo Usage -options"); puts("-word_sim word similarity calculation"); puts("-doc_dup document duplicate detection"); puts("-doc_clustering document clustering"); } int main(int argc, char** argv) { if (argc < 2) { print_usage_main(); return 0; } initCUDA(); if (strcmp(argv[1], "-word_sim") == 0) { word_similarity_test(argc, argv); } else if (strcmp(argv[1], "-doc_dup") == 0) { doc_dup_detection_test(argc, argv); } else if (strcmp(argv[1], "-doc_clustering") == 0) { doc_clustering_test(argc, argv); } else { print_usage_main(); return 0; } }
adef3778243e317708d3e12535fbd695400942e6.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> /* * This example demonstrates submitting work to a CUDA stream in breadth-first * order. Work submission in breadth-first order prevents false-dependencies * from reducing the parallelism of an application. kernel_1, kernel_2, * kernel_3, and kernel_4 simply implement identical, dummy computation. * Separate kernels are used to make the scheduling of these kernels simpler to * visualize in the Visual Profiler. */ #define N 300000 #define NSTREAM 4 __global__ void kernel_1() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_2() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_3() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_4() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } int main(int argc, char **argv) { int n_streams = NSTREAM; int isize = 1; int iblock = 1; int bigcase = 0; // get argument from command line if (argc > 1) n_streams = atoi(argv[1]); if (argc > 2) bigcase = atoi(argv[2]); float elapsed_time; // set up max connectioin char *iname = "CUDA_DEVICE_MAX_CONNECTIONS"; SET_ENV(iname, "32", 1); char *ivalue = GET_ENV(iname); //printf ("%s = %s\n", iname, ivalue); int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams); CHECK(hipSetDevice(dev)); // check if device support hyper-q if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5)) { if (deviceProp.concurrentKernels == 0) { printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n"); printf("> CUDA kernel runs will be serialized\n"); } else { printf("> GPU does not support HyperQ\n"); printf("> CUDA kernel runs will have limited concurrency\n"); } } printf("> Compute Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); // Allocate and initialize an array of stream handles hipStream_t *streams = (hipStream_t *) malloc(n_streams * sizeof(hipStream_t)); for (int i = 0; i < n_streams; i++) { CHECK(hipStreamCreate(&(streams[i]))); } // run kernel with more threads if (bigcase == 1) { iblock = 512; isize = 1 << 12; } // set up execution configuration dim3 block(iblock); dim3 grid(isize / iblock); //printf("> grid %d block %d\n", grid.x, block.x); // creat events hipEvent_t start, stop; CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); // record start event CHECK(hipEventRecord(start, 0)); // dispatch job with breadth first ordering for (int i = 0; i < n_streams; i++) hipLaunchKernelGGL(( kernel_1), dim3(grid), dim3(block), 0, streams[i], ); for (int i = 0; i < n_streams; i++) hipLaunchKernelGGL(( kernel_2), dim3(grid), dim3(block), 0, streams[i], ); for (int i = 0; i < n_streams; i++) hipLaunchKernelGGL(( kernel_3), dim3(grid), dim3(block), 0, streams[i], ); for (int i = 0; i < n_streams; i++) hipLaunchKernelGGL(( kernel_4), dim3(grid), dim3(block), 0, streams[i], ); // record stop event CHECK(hipEventRecord(stop, 0)); CHECK(hipEventSynchronize(stop)); // calculate elapsed time CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("Measured time for parallel execution = %.3fs\n", elapsed_time / 1000.0f); // release all stream for (int i = 0; i < n_streams; i++) { CHECK(hipStreamDestroy(streams[i])); } free(streams); // destroy events CHECK(hipEventDestroy(start)); CHECK(hipEventDestroy(stop)); // reset device CHECK(hipDeviceReset()); return 0; }
adef3778243e317708d3e12535fbd695400942e6.cu
#include "../common/common.h" #include <stdio.h> #include <cuda_runtime.h> #include <stdlib.h> /* * This example demonstrates submitting work to a CUDA stream in breadth-first * order. Work submission in breadth-first order prevents false-dependencies * from reducing the parallelism of an application. kernel_1, kernel_2, * kernel_3, and kernel_4 simply implement identical, dummy computation. * Separate kernels are used to make the scheduling of these kernels simpler to * visualize in the Visual Profiler. */ #define N 300000 #define NSTREAM 4 __global__ void kernel_1() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_2() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_3() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_4() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } int main(int argc, char **argv) { int n_streams = NSTREAM; int isize = 1; int iblock = 1; int bigcase = 0; // get argument from command line if (argc > 1) n_streams = atoi(argv[1]); if (argc > 2) bigcase = atoi(argv[2]); float elapsed_time; // set up max connectioin char *iname = "CUDA_DEVICE_MAX_CONNECTIONS"; SET_ENV(iname, "32", 1); char *ivalue = GET_ENV(iname); //printf ("%s = %s\n", iname, ivalue); int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams); CHECK(cudaSetDevice(dev)); // check if device support hyper-q if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5)) { if (deviceProp.concurrentKernels == 0) { printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n"); printf("> CUDA kernel runs will be serialized\n"); } else { printf("> GPU does not support HyperQ\n"); printf("> CUDA kernel runs will have limited concurrency\n"); } } printf("> Compute Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); // Allocate and initialize an array of stream handles cudaStream_t *streams = (cudaStream_t *) malloc(n_streams * sizeof(cudaStream_t)); for (int i = 0; i < n_streams; i++) { CHECK(cudaStreamCreate(&(streams[i]))); } // run kernel with more threads if (bigcase == 1) { iblock = 512; isize = 1 << 12; } // set up execution configuration dim3 block(iblock); dim3 grid(isize / iblock); //printf("> grid %d block %d\n", grid.x, block.x); // creat events cudaEvent_t start, stop; CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); // record start event CHECK(cudaEventRecord(start, 0)); // dispatch job with breadth first ordering for (int i = 0; i < n_streams; i++) kernel_1<<<grid, block, 0, streams[i]>>>(); for (int i = 0; i < n_streams; i++) kernel_2<<<grid, block, 0, streams[i]>>>(); for (int i = 0; i < n_streams; i++) kernel_3<<<grid, block, 0, streams[i]>>>(); for (int i = 0; i < n_streams; i++) kernel_4<<<grid, block, 0, streams[i]>>>(); // record stop event CHECK(cudaEventRecord(stop, 0)); CHECK(cudaEventSynchronize(stop)); // calculate elapsed time CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Measured time for parallel execution = %.3fs\n", elapsed_time / 1000.0f); // release all stream for (int i = 0; i < n_streams; i++) { CHECK(cudaStreamDestroy(streams[i])); } free(streams); // destroy events CHECK(cudaEventDestroy(start)); CHECK(cudaEventDestroy(stop)); // reset device CHECK(cudaDeviceReset()); return 0; }
c953325916a3808d02d4a89981619566bfbfeadf.hip
// !!! This is a file automatically generated by hipify!!! // Includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/types.h> #include <unistd.h> #include <errno.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <hip/hip_vector_types.h> // includes, kernels #include <common.cu> #include <mummergpu.h> #include <mummergpu_kernel.cu> int USE_PRINT_KERNEL = 1; #define BREATHING_ROOM (16 * 1024 * 1024) #define BASES_PER_TREE_PAGE 8388608 //#define BASES_PER_TREE_PAGE 7000000 #define BLOCKSIZE 256 unsigned int cuda_calls = 0; void trap_dbg() { fprintf(stderr, "Trapped\n"); } #define CUDA_SAFE_CALL( call) do { \ cuda_calls++; \ hipError_t err = call; \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \ __FILE__, __LINE__, err, hipGetErrorString( err) ); \ trap_dbg(); \ exit(EXIT_FAILURE); \ } } while (0) # define CU_SAFE_CALL_NO_SYNC( call ) do { \ hipError_t err = call; \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \ err, __FILE__, __LINE__ ); \ exit(EXIT_FAILURE); \ } } while (0) # define CUT_DEVICE_INIT_DRV(cuDevice) do { \ cuDevice = 0; \ int deviceCount = 0; \ hipError_t err = hipInit(0); \ if (hipSuccess == err) \ CU_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); \ if (deviceCount == 0) { \ fprintf(stderr, "There is no device.\n"); \ exit(EXIT_FAILURE); \ } \ int dev; \ for (dev = 0; dev < deviceCount; ++dev) { \ int major, minor; \ CU_SAFE_CALL_NO_SYNC(hipDeviceComputeCapability(&major, &minor, dev));\ if (major >= 1) \ break; \ } \ if (dev == deviceCount) { \ fprintf(stderr, "There is no device supporting CUDA.\n"); \ exit(EXIT_FAILURE); \ } \ else \ CU_SAFE_CALL_NO_SYNC(hipDeviceGet(&cuDevice, dev)); \ } while (0) unsigned int num_bind_tex_calls = 0; #define BIND_TEX(offset, tex, arr, desc, len) do { \ CUDA_SAFE_CALL(hipBindTexture(offset, tex, arr, desc, len)); \ ++num_bind_tex_calls; \ } while(0) #define BIND_TEX_ARRAY(tex, arr, desc) do { \ CUDA_SAFE_CALL(hipBindTextureToArray(tex, arr, desc)); \ ++num_bind_tex_calls; \ } while(0) #define CUDA_MALLOC(ptr, size) do { \ hipMalloc(ptr, size); \ ++num_bind_tex_calls; \ } while(0) #define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) do { \ hipMallocPitch(ptr, out_pitch, rowsize, numrows); \ ++num_bind_tex_calls; \ } while(0) #define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) do { \ hipMallocArray(ptr, desc, pitch, rows); \ ++num_bind_tex_calls; \ } while(0) //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" void computeGold(MatchResults* results, char* refstr, char* queries, int* queryAddrs, int* queryLengths, PixelOfNode* nodeTexture, PixelOfChildren* childrenTexture, int numQueries, int mismatch_length, int rc); extern "C" void getReferenceString(const char * filename, char** refstr, size_t* reflen); extern "C" void createTreeTexture(const char * filename, PixelOfNode** nodeTexture, PixelOfChildren** childrenTexture, unsigned int* width, unsigned int* node_height, unsigned int* children_height, AuxiliaryNodeData** aux_data, int* num_match_coords, int min_match_len, Statistics* statistics, const char * dotfilename, const char * texfilename); extern "C" void getQueriesTexture(int qfile, char** queryTexture, size_t* queryLength, int** queryAddrs, char*** queryNames, int** queryLengths, unsigned int* numQueries, unsigned int* num_match_coords, unsigned int device_memory_avail, int min_match_length, bool rc); extern "C" int lookupNumLeaves(ReferencePage * page, TextureAddress addr); void printAlignments(ReferencePage* page, Alignment* alignments, char* query, int qrylen, TextureAddress nodeid, int qrypos, int edge_depth, int min_match, bool rc, bool forwardcoordinates); int countLeafNodes(int nodeid); extern "C" void mapQueriesEndToEnd(MatchContext* ctx, ReferencePage* page, MatchInfo* h_matches, unsigned int numMatches, Alignment* h_alignments, unsigned int numAligments); char * createTimer() { unsigned int * ptr = (unsigned int *) malloc(sizeof(struct Timer_t)); memset(ptr, 0, sizeof(struct Timer_t)); return (char *) ptr; } void startTimer(char * ptr) { gettimeofday(&(((struct Timer_t *)ptr)->start_m), NULL); } void stopTimer(char * ptr) { gettimeofday(&(((struct Timer_t *)ptr)->end_m), NULL); } float getTimerValue(char * ptr) { Timer_t * timer = (Timer_t*) ptr; if (timer == NULL) { fprintf(stderr, "Uninitialized timer!!!\n"); return 0.0; } if (timer->end_m.tv_sec == 0) { stopTimer(ptr); } return (float) (1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec) + (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec))); } void deleteTimer(char * ptr) { free((Timer_t *)ptr); } extern "C" int createReference(const char* fromFile, Reference* ref) { if (!fromFile || !ref) return -1; char * loadreftimer = createTimer(); startTimer(loadreftimer); getReferenceString(fromFile, &(ref->str), &(ref->len)); stopTimer(loadreftimer); ref->t_load_from_disk += getTimerValue(loadreftimer); deleteTimer(loadreftimer); return 0; } extern "C" int destroyReference(Reference* ref) { free(ref->h_node_tex_array); free(ref->h_children_tex_array); free(ref->str); #if REORDER_REF free(ref->h_ref_array); #endif free(ref->aux_data); #if TREE_ACCESS_HISTOGRAM free(ref->h_node_hist); free(ref->h_child_hist); #endif ref->str = NULL; ref->len = 0; return 0; } extern "C" int createQuerySet(const char* fromFile, QuerySet* queries) { fprintf(stderr, "Opening %s...\n", fromFile); int qfile = open(fromFile, O_RDONLY); if (qfile == -1) { fprintf(stderr, "Can't open %s: %d\n", fromFile, errno); exit (1); } queries->qfile = qfile; return 0; } extern "C" int destroyQuerySet(QuerySet* queries) { if (queries->qfile) close(queries->qfile); return 0; } extern "C" void printStringForError(int err) { } extern "C" int createMatchContext(Reference* ref, QuerySet* queries, MatchResults* matches, bool on_cpu, int min_match_length, char* stats_file, bool reverse, bool forwardreverse, bool forwardcoordinates, bool showQueryLength, char* dotfilename, char* texfilename, MatchContext* ctx) { ctx->queries = queries; ctx->ref = ref; ctx->full_ref = ref->str; ctx->full_ref_len = ref->len; ctx->on_cpu = on_cpu; ctx->min_match_length = min_match_length; ctx->stats_file = stats_file; ctx->reverse = reverse; ctx->forwardreverse = forwardreverse; ctx->forwardcoordinates = forwardcoordinates; ctx->show_query_length = showQueryLength; ctx->dotfilename = dotfilename; ctx->texfilename = texfilename; return 0; } extern "C" int destroyMatchContext(MatchContext* ctx) { free(ctx->full_ref); //destroyReference(ctx->ref); destroyQuerySet(ctx->queries); return 0; } void buildReferenceTexture(Reference* ref, char* full_ref, size_t begin, size_t end, int min_match_len, char* dotfilename, char* texfilename, Statistics* statistics) { fprintf(stderr, "Building reference texture...\n"); PixelOfNode* nodeTexture = NULL; PixelOfChildren * childrenTexture = NULL; unsigned int width = 0; unsigned int node_height = 0; unsigned int children_height = 0; AuxiliaryNodeData* aux_data = NULL; int num_nodes; char * loadreftimer = createTimer(); startTimer(loadreftimer); ref->len = end - begin + 3; ref->str = (char*)malloc(ref->len); ref->str[0] = 's'; strncpy(ref->str + 1, full_ref + begin, ref->len - 3); strcpy(ref->str + ref->len - 2, "$"); stopTimer(loadreftimer); statistics->t_ref_from_disk += getTimerValue(loadreftimer) + ref->t_load_from_disk; deleteTimer(loadreftimer); createTreeTexture(ref->str, &nodeTexture, &childrenTexture, &width, &node_height, &children_height, &aux_data, &num_nodes, min_match_len, statistics, dotfilename, texfilename); ref->h_node_tex_array = nodeTexture; ref->h_children_tex_array = childrenTexture; ref->tex_width = width; ref->tex_node_height = node_height; ref->tex_children_height = children_height; #if TREE_ACCESS_HISTOGRAM ref->h_node_hist = (int*)calloc(width * node_height, sizeof(int)); ref->h_child_hist = (int*)calloc(width * children_height, sizeof(int)); #endif ref->aux_data = aux_data; ref->num_nodes = num_nodes; ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) + (width * children_height * sizeof(PixelOfChildren)); fprintf(stderr, "This tree will need %d bytes on the board\n", ref->bytes_on_board); #if REORDER_REF char * reordertimer = createTimer(); startTimer(reordertimer); unsigned int refpitch = ref->pitch = 65536; int numrows = ceil(ref->len / ((float)refpitch)); int blocksize = 4; numrows += blocksize; int refstrsize = numrows * refpitch; ref->h_ref_array = (char *) malloc(refstrsize); ref->bytes_on_board += refstrsize; fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize); int z_max = numrows * refpitch; for (int z = 0; z < z_max; z++) { ref->h_ref_array[z] = 'Z'; } int x, y; int maxx = 0, maxy = 0; size_t reflen = ref->len; char* refstr = ref->str; int block_dim = refpitch * blocksize; for (int i = 0; i < reflen; i++) { int bigx = i % (block_dim); // ref string reorder int bigy = i / (block_dim); y = bigy * blocksize + bigx % blocksize; x = bigx / blocksize; // printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]); assert(x < refpitch); assert(y < numrows); ref->h_ref_array[y*refpitch+x] = refstr[i]; if (x > maxx) { maxx = x; } if (y > maxy) { maxy = y; } } if ((maxx >= refpitch) || (maxy >= numrows)) { fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n", maxx, refpitch, maxy, numrows); exit(1); } stopTimer(reordertimer); if (statistics) statistics->t_reorder_ref_str += getTimerValue(reordertimer); deleteTimer(reordertimer); #else fprintf(stderr, "The refstr requires %d bytes\n", ref->len); ref->bytes_on_board += ref->len; #endif } void boardMemory(unsigned int * free_mem, unsigned int * total_mem) { // The emulator doesn't allow calls to cuMemGetInfo #ifdef __DEVICE_EMULATION__ *free_mem = 512*1024*1024; *total_mem = 768*1024*1024; #else CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem)); #endif } void loadReferenceTexture(MatchContext* ctx) { Reference* ref = ctx->ref; int numrows = ceil(ref->len / ((float)ref->pitch)); int blocksize = 4; numrows += blocksize; hipChannelFormatDesc refTextureDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindSigned); if (!ctx->on_cpu) { char * toboardtimer = createTimer(); startTimer(toboardtimer); #if REFTEX #if REORDER_REF CUDA_MALLOC_ARRAY((hipArray**)(&ref->d_ref_array), &refTextureDesc, ref->pitch, numrows); CUDA_SAFE_CALL(hipMemcpyToArray( (hipArray*)(ref->d_ref_array), 0, 0, ref->h_ref_array, numrows*ref->pitch, hipMemcpyHostToDevice)); reftex.addressMode[0] = hipAddressModeClamp; reftex.addressMode[1] = hipAddressModeClamp; reftex.filterMode = hipFilterModePoint; reftex.normalized = false; BIND_TEX_ARRAY(reftex, (hipArray*)ref->d_ref_array, refTextureDesc); ctx->ref->bytes_on_board += numrows * ref->pitch; #else CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len); CUDA_SAFE_CALL( hipMemcpy( (void*)(ref->d_ref_array), ref->str, ref->len, hipMemcpyHostToDevice) ); reftex.addressMode[0] = hipAddressModeClamp; reftex.filterMode = hipFilterModePoint; reftex.normalized = false; // access with normalized texture coordinates hipChannelFormatDesc refDesc = hipCreateChannelDesc(8,0,0,0, hipChannelFormatKindUnsigned); BIND_TEX(0, reftex, (void*)(ref->d_ref_array), refDesc, ref->len); ctx->ref->bytes_on_board += ref->len; #endif #else #if REORDER_REF size_t refpitch; CUDA_MALLOC_PITCH( (void**)(&ref->d_ref_array), &refpitch, ref->pitch * sizeof(char), numrows); CUDA_SAFE_CALL( hipMemcpy2D((ref->d_ref_array), refpitch, ref->h_ref_array, ref->pitch , ref->pitch * sizeof(char), numrows, hipMemcpyHostToDevice)); ctx->ref->bytes_on_board += numrows * ref->pitch; #else CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len); CUDA_SAFE_CALL( hipMemcpy( (void*)(ref->d_ref_array), ref->str, ref->len, hipMemcpyHostToDevice) ); ctx->ref->bytes_on_board += ref->len; #endif #endif stopTimer(toboardtimer); ctx->statistics.t_ref_str_to_board += getTimerValue(toboardtimer); deleteTimer(toboardtimer); } else { ref->d_ref_array = NULL; } } void unloadReferenceString(Reference* ref) { #if REFTEX CUDA_SAFE_CALL(hipUnbindTexture( reftex ) ); #endif #if REORDER_REF && REFTEX CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_ref_array))); #else CUDA_SAFE_CALL(hipFree((ref->d_ref_array))); #endif ref->d_ref_array = NULL; } void unloadReferenceTree(MatchContext* ctx) { Reference* ref = ctx->ref; #if REORDER_TREE // Unload nodetex #if NODETEX CUDA_SAFE_CALL(hipUnbindTexture( nodetex ) ); CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_node_tex_array))); #else CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array)); #endif ref->d_node_tex_array = NULL; // Unload childrentex if (ref->d_children_tex_array) { #if CHILDTEX CUDA_SAFE_CALL(hipUnbindTexture( childrentex ) ); CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_children_tex_array))); #else CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array)); #endif } ref->d_children_tex_array = NULL; #else #if NODETEX CUDA_SAFE_CALL(hipUnbindTexture( nodetex ) ); #endif CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array)); ref->d_node_tex_array = NULL; // Unload childrentex if (ref->d_children_tex_array) { #if CHILDTEX CUDA_SAFE_CALL(hipUnbindTexture( childrentex ) ); #endif CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array)); ref->d_children_tex_array = NULL; } #endif #if TREE_ACCESS_HISTOGRAM CUDA_SAFE_CALL(hipFree(ref->d_node_hist)); ref->d_node_hist = NULL; CUDA_SAFE_CALL(hipFree(ref->d_child_hist)); ref->d_child_hist = NULL; #endif } //loads a tree and text for [begin, end) in the reference void loadReference(MatchContext* ctx) { Reference* ref = ctx->ref; ref->bytes_on_board = 0; loadReferenceTexture(ctx); if (!ctx->on_cpu) { char * toboardtimer = createTimer(); startTimer(toboardtimer); // node texels ref->bytes_on_board += ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode)); // children texels ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren); #if REORDER_TREE #if NODETEX hipChannelFormatDesc nodeTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned); CUDA_MALLOC_ARRAY( (hipArray**)(&ref->d_node_tex_array), &nodeTextureDesc, ref->tex_width, ref->tex_node_height ); CUDA_SAFE_CALL( hipMemcpyToArray( (hipArray*)(ref->d_node_tex_array), 0, 0, ref->h_node_tex_array, ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode), hipMemcpyHostToDevice)); nodetex.addressMode[0] = hipAddressModeClamp; nodetex.addressMode[1] = hipAddressModeClamp; nodetex.filterMode = hipFilterModePoint; nodetex.normalized = false; // access with normalized texture coordinates BIND_TEX_ARRAY(nodetex, (hipArray*)ref->d_node_tex_array, nodeTextureDesc); #else size_t nodepitch; CUDA_MALLOC_PITCH( (void**)(&ref->d_node_tex_array), &nodepitch, ref->tex_width * sizeof(PixelOfNode), ref->tex_node_height ); CUDA_SAFE_CALL( hipMemcpy2D((ref->d_node_tex_array), nodepitch, ref->h_node_tex_array, nodepitch, ref->tex_width * sizeof(PixelOfNode), ref->tex_node_height, hipMemcpyHostToDevice)); #endif if (ref->tex_children_height) { #if CHILDTEX hipChannelFormatDesc childrenTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned); CUDA_MALLOC_ARRAY( (hipArray**)(&ref->d_children_tex_array), &childrenTextureDesc, ref->tex_width, ref->tex_children_height ); CUDA_SAFE_CALL( hipMemcpyToArray((hipArray*)(ref->d_children_tex_array), 0, 0, ref->h_children_tex_array, ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren), hipMemcpyHostToDevice)); childrentex.addressMode[0] = hipAddressModeClamp; childrentex.addressMode[1] = hipAddressModeClamp; childrentex.filterMode = hipFilterModePoint; childrentex.normalized = false; // access with normalized texture coordinates BIND_TEX_ARRAY(childrentex, (hipArray*)(ref->d_children_tex_array), childrenTextureDesc); #else size_t childpitch; CUDA_MALLOC_PITCH( (void**)(&ref->d_children_tex_array), &childpitch, ref->tex_width * sizeof(PixelOfChildren), ref->tex_children_height ); CUDA_SAFE_CALL( hipMemcpy2D((ref->d_children_tex_array), childpitch, ref->h_children_tex_array, childpitch, ref->tex_width * sizeof(PixelOfNode), ref->tex_children_height, hipMemcpyHostToDevice)); #endif } #if TREE_ACCESS_HISTOGRAM // node hist ref->bytes_on_board += ref->tex_width * ref->tex_node_height * sizeof(int); CUDA_MALLOC( (void**)(&ref->d_node_hist), ref->tex_width * ref->tex_node_height *sizeof(int)); CUDA_SAFE_CALL( hipMemset((ref->d_node_hist),0, ref->tex_width * ref->tex_node_height * sizeof(int))); if (ref->tex_children_height) { // children hist ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(int); fprintf(stderr, "after child_hist ref->bytes_on_board:%ld\n", ref->bytes_on_board); CUDA_MALLOC( (void**)(&ref->d_child_hist), ref->tex_width * ref->tex_children_height *sizeof(int)); CUDA_SAFE_CALL( hipMemset((ref->d_child_hist),0, ref->tex_width * ref->tex_children_height * sizeof(int))); } #endif #else // NO TREE REORDERING // Node tex, 1-dimensional CUDA_MALLOC( (void**)(&ref->d_node_tex_array), ref->tex_node_height * sizeof(PixelOfNode)); CUDA_SAFE_CALL( hipMemcpy( (ref->d_node_tex_array), ref->h_node_tex_array, ref->tex_node_height * sizeof(PixelOfNode), hipMemcpyHostToDevice)); #if NODETEX hipChannelFormatDesc nodeTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned); nodetex.addressMode[0] = hipAddressModeClamp; nodetex.filterMode = hipFilterModePoint; nodetex.normalized = false; // access with normalized texture coordinates BIND_TEX(0, nodetex, (void*)(ref->d_node_tex_array), nodeTextureDesc, ref->tex_node_height* sizeof(PixelOfNode)); #endif if (ref->tex_children_height) { // Child tex, 1-dimensional CUDA_MALLOC( (void**)(&ref->d_children_tex_array), ref->tex_children_height * sizeof(PixelOfChildren)); CUDA_SAFE_CALL( hipMemcpy( (ref->d_children_tex_array), ref->h_children_tex_array, ref->tex_children_height * sizeof(PixelOfChildren), hipMemcpyHostToDevice)); #if CHILDTEX hipChannelFormatDesc childTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned); childrentex.addressMode[0] = hipAddressModeClamp; childrentex.filterMode = hipFilterModePoint; childrentex.normalized = false; // access with normalized texture coordinates BIND_TEX(0, childrentex, (void*)(ref->d_children_tex_array), childTextureDesc, ref->tex_children_height* sizeof(PixelOfChildren)); #endif } #if TREE_ACCESS_HISTOGRAM ref->bytes_on_board += ref->tex_node_height * sizeof(int); CUDA_MALLOC( (void**)(&ref->d_node_hist), ref->tex_node_height *sizeof(int)); CUDA_SAFE_CALL( hipMemset((ref->d_node_hist),0, ref->tex_node_height * sizeof(int))); if (ref->tex_children_height) { ref->bytes_on_board += ref->tex_children_height * sizeof(int); CUDA_MALLOC( (void**)(&ref->d_child_hist), ref->tex_children_height *sizeof(int)); CUDA_SAFE_CALL( hipMemset((ref->d_child_hist),0, ref->tex_children_height * sizeof(int))); } #endif #endif #if TWO_LEVEL_NODE_TREE PixelOfNode node_buf[NODE_THRESH]; memset(node_buf, 0, sizeof(node_buf)); for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes); ++i) { TextureAddress myaddress(id2addr(i)); #if MERGETEX && REORDER_TREE myaddress.x &= 0x7FF; myaddress.x *= 2; int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc]; #elif REORDER_TREE int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc]; #elif MERGETEX node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x*2]; #else node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x]; #endif } CUDA_SAFE_CALL( hipMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf))); #endif #if TWO_LEVEL_CHILD_TREE PixelOfChildren child_buf[CHILD_THRESH]; memset(child_buf, 0, sizeof(child_buf)); for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes); ++i) { TextureAddress myaddress(id2addr(i)); #if MERGETEX && REORDER_TREE myaddress.x &= 0x7FF; myaddress.x *= 2; int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[loc+1]; #elif REORDER_TREE int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; child_buf[i]= ((PixelOfChildren*)(ref->h_children))[loc]; #elif MERGETEX child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[myaddress.x*2+1]; #else child_buf[i]= ((PixelOfChildren*)(ref->h_children_tex_array))[myaddress.x]; #endif } CUDA_SAFE_CALL( hipMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf))); #endif stopTimer(toboardtimer); ctx->statistics.t_tree_to_board += getTimerValue(toboardtimer); deleteTimer(toboardtimer); fprintf(stderr, "done\n"); } else { ref->d_node_tex_array = NULL; ref->d_children_tex_array = NULL; } } void dumpQueryBlockInfo(QuerySet* queries) { fprintf(stderr, "\tProcessing queries %s to %s\n", queries->h_names[0], queries->h_names[queries->count-1]); } void loadQueries(MatchContext* ctx) { QuerySet* queries = ctx->queries; queries->bytes_on_board = 0; unsigned int numQueries = queries->count; if (!ctx->on_cpu) { fprintf(stderr, "Allocating device memory for queries... "); char* toboardtimer = createTimer(); startTimer(toboardtimer); dumpQueryBlockInfo(queries); CUDA_MALLOC((void**) &queries->d_tex_array, queries->texlen); \ queries->bytes_on_board += queries->texlen; CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_tex_array, queries->h_tex_array + queries->h_addrs_tex_array[0], queries->texlen, hipMemcpyHostToDevice)); #if QRYTEX qrytex.addressMode[0] = hipAddressModeClamp; qrytex.filterMode = hipFilterModePoint; qrytex.normalized = false; // access with normalized texture coordinates hipChannelFormatDesc qryDesc = hipCreateChannelDesc(8,0,0,0, hipChannelFormatKindUnsigned); BIND_TEX(0, qrytex, (void*)(queries->d_tex_array), qryDesc, queries->texlen); #endif CUDA_MALLOC((void**) &queries->d_addrs_tex_array, numQueries * sizeof(int)); queries->bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_addrs_tex_array, queries->h_addrs_tex_array, numQueries * sizeof(int), hipMemcpyHostToDevice)); CUDA_MALLOC((void**) &queries->d_lengths_array, numQueries * sizeof(int)); queries->bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_lengths_array, queries->h_lengths_array, numQueries * sizeof(int), hipMemcpyHostToDevice)); stopTimer(toboardtimer); ctx->statistics.t_queries_to_board += getTimerValue(toboardtimer); deleteTimer(toboardtimer); fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board); } else { queries->d_addrs_tex_array = NULL; queries->d_tex_array = NULL; queries->d_lengths_array = NULL; fprintf(stderr, " allocated %ld bytes\n", 2 * numQueries*sizeof(int) + queries->texlen); } } void unloadQueries(MatchContext* ctx) { QuerySet* queries = ctx->queries; CUDA_SAFE_CALL(hipFree(queries->d_tex_array)); queries->d_tex_array = NULL; CUDA_SAFE_CALL(hipFree(queries->d_addrs_tex_array)); queries->d_addrs_tex_array = NULL; CUDA_SAFE_CALL(hipFree(queries->d_lengths_array)); queries->d_lengths_array = NULL; queries->bytes_on_board = 0; } // Computes the location of the first MatchCoord for a given query. NOTE: // Do NOT use this function if COALESCED_QUERIES == 1 inline int match_coord_addrs(int qryid, int qry_addrs, int match_length) { return qry_addrs - qryid * (match_length + 1); } // Construct the offset table for a set of queries. This table will be used // by the printing functions, and if COALESCED_QUERIES == 1, by the matching // kernel. void buildCoordOffsetArray(MatchContext* ctx, int** h_coord_offset_array, unsigned int* num_coords) { int numCoords = 0; int match_length = ctx->min_match_length; int numQueries = ctx->queries->count; int* lengths = ctx->queries->h_lengths_array; int* coord_offsets = (int*)calloc(numQueries, sizeof(int)); #if COALESCED_QUERIES for (unsigned int i = 0; i < numQueries; i += WARP_SIZE) { // Every query in this warp will need at least this many coords int max_num_coords = 0; for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) { int num_coords = lengths[i + j] - match_length + 1; if ( max_num_coords < num_coords) max_num_coords = num_coords; } unsigned int block_size = max_num_coords * WARP_SIZE; for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) { ctx->results.h_coord_tex_array[i + j] = numCoords + j; } numCoords += block_size; } #else for (unsigned int i = 0; i < numQueries; ++i) { int qryoffset = ctx->queries->h_addrs_tex_array[i]; coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length); } if (numQueries > 0) { unsigned int last_qry = numQueries - 1; unsigned int last_qry_len = lengths[last_qry] - match_length + 1; numCoords = coord_offsets[last_qry] + last_qry_len; fprintf(stderr, "Need %d match coords for this result array\n", numCoords); } #endif *num_coords = numCoords; *h_coord_offset_array = coord_offsets; } void loadResultBuffer(MatchContext* ctx) { unsigned int numQueries = ctx->queries->count; assert (numQueries); char* offsettimer = createTimer(); startTimer(offsettimer); buildCoordOffsetArray(ctx, &(ctx->results.h_coord_tex_array), &(ctx->results.numCoords)); stopTimer(offsettimer); ctx->statistics.t_build_coord_offsets += getTimerValue(offsettimer); deleteTimer(offsettimer); unsigned int numCoords = ctx->results.numCoords; fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...", numQueries, numCoords*sizeof(MatchCoord) ); unsigned int boardFreeMemory = 0; unsigned int total_mem = 0; boardMemory(&boardFreeMemory, &total_mem); fprintf(stderr,"board free memory: %u total memory: %u\n", boardFreeMemory, total_mem); ctx->results.h_match_coords = (MatchCoord*) calloc( numCoords, sizeof(MatchCoord)); if (ctx->results.h_match_coords == NULL) { trap_dbg(); exit(EXIT_FAILURE); } if (!ctx->on_cpu) { char* toboardtimer = createTimer(); startTimer(toboardtimer); ctx->results.bytes_on_board = 0; CUDA_MALLOC( (void**) &ctx->results.d_match_coords, numCoords * sizeof(MatchCoord)); ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord); CUDA_SAFE_CALL( hipMemset( (void*)ctx->results.d_match_coords, 0, numCoords * sizeof(MatchCoord))); #if COALESCED_QUERIES CUDA_MALLOC((void**) &ctx->results.d_coord_tex_array, numQueries * sizeof(int)); ctx->results.bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( hipMemcpy((void*) ctx->results.d_coord_tex_array, ctx->results.h_coord_tex_array, numQueries * sizeof(int), hipMemcpyHostToDevice)); #endif stopTimer(toboardtimer); ctx->statistics.t_match_coords_to_board += getTimerValue(toboardtimer); deleteTimer(toboardtimer); } else { ctx->results.d_match_coords = NULL; } fprintf(stderr, "done\n"); } void unloadResultBuffer(MatchContext* ctx) { CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords)); ctx->results.d_match_coords = NULL; ctx->results.bytes_on_board = 0; #if COALESCED_QUERIES CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords)); #endif } void transferResultsFromDevice(MatchContext* ctx) { if (!ctx->on_cpu) { char* fromboardtimer = createTimer(); startTimer(fromboardtimer); CUDA_SAFE_CALL(hipMemcpy(ctx->results.h_match_coords, ctx->results.d_match_coords, ctx->results.numCoords * sizeof(MatchCoord), hipMemcpyDeviceToHost) ); #if TREE_ACCESS_HISTOGRAM CUDA_SAFE_CALL(hipMemcpy(ctx->ref->h_node_hist, ctx->ref->d_node_hist, ctx->ref->tex_node_height * ctx->ref->tex_width * sizeof(int), hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL(hipMemcpy(ctx->ref->h_child_hist, ctx->ref->d_child_hist, ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int), hipMemcpyDeviceToHost) ); if (ctx->statistics.node_hist_size < ctx->ref->tex_width * ctx->ref->tex_node_height) { int* temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int)); if (ctx->statistics.node_hist_size) memcpy(temp, ctx->statistics.node_hist, ctx->statistics.node_hist_size * sizeof(int)); ctx->statistics.node_hist = temp; ctx->statistics.node_hist_size = ctx->ref->tex_width * ctx->ref->tex_node_height; } if (ctx->statistics.child_hist_size < ctx->ref->tex_width * ctx->ref->tex_children_height) { temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_children_height, sizeof(int)); if (ctx->statistics.hist_size) memcpy(temp, ctx->statistics.child_hist, ctx->statistics.hist_size * sizeof(int)); ctx->statistics.child_hist = temp; ctx->statistics.child_hist_size = ctx->ref->tex_width * ctx->ref->tex_children_height; } for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) { ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i]; } for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) { ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i]; } #endif stopTimer(fromboardtimer); ctx->statistics.t_match_coords_from_board += getTimerValue(fromboardtimer); deleteTimer(fromboardtimer); } } int flushOutput(); int addToBuffer(char* string); char numbuffer[32]; MatchCoord* coordForQueryChar(MatchContext* ctx, unsigned int qryid, unsigned int qrychar) { MatchResults* results = &(ctx->results); MatchCoord* coords = results->h_match_coords; #if COALESCED_QUERIES return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE; #else return coords + results->h_coord_tex_array[qryid] + qrychar; #endif } void coordsToPrintBuffers(MatchContext* ctx, ReferencePage* page, MatchInfo** matches, Alignment** alignments, unsigned int mem_avail, unsigned int* coord_idx, unsigned int* match_idx, unsigned int* align_idx, unsigned int* nextqry, unsigned int* nextqrychar) { unsigned int numQueries = ctx->queries->count; int match_length = ctx->min_match_length; unsigned int cidx = *coord_idx; unsigned int midx = 0; unsigned int numCoords = ctx->results.numCoords; unsigned int numMatches = 0; unsigned int numAlignments = 0; int DEBUG = 0; if (DEBUG && cidx == 0) { for (int j = 0; j < numCoords; ++j) { MatchCoord * coord = ctx->results.h_match_coords+j; if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) { //fprintf(stdout, "node: %d\n", // coord->node); fprintf(stdout, "node: %d leaves:%d\n", coord->node.data, lookupNumLeaves(page, coord->node)); } } exit(0); } // How much can we fit into mem_avail? for (int j = cidx; j < numCoords; ++j) { MatchCoord* coord = ctx->results.h_match_coords + j; int queryAlignments = 0; int queryMatches = 0; if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) { int numLeaves = lookupNumLeaves(page, coord->node); queryAlignments += numLeaves; queryMatches++; } int allMatches = numMatches + queryMatches; int allAlignments = numAlignments + queryAlignments; int neededSize = allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment); if (neededSize > mem_avail || (allMatches/BLOCKSIZE) >= MAX_GRID_DIMENSION) { // adding this match won't fit on the board break; } ++cidx; numMatches = allMatches; numAlignments = allAlignments; } MatchInfo* M = (MatchInfo*)calloc(numMatches, sizeof(MatchInfo)); unsigned int alignmentOffset = 0; int qry = *nextqry; int qrychar = *nextqrychar; bool set_full = false; while (qry < numQueries) { // h_lengths_array doesn't count the 'q' at the beginning of each query int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length; while (qrychar < qlen) { if (midx >= numMatches) { set_full = true; break; } MatchCoord* coord = coordForQueryChar(ctx, qry, qrychar); if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) { MatchInfo m; m.resultsoffset = alignmentOffset; m.qrystartpos = qrychar; m.matchnode = coord->node; m.edgematch = coord->edge_match_length; m.numLeaves = lookupNumLeaves(page, m.matchnode); m.queryid = qry; alignmentOffset += m.numLeaves; M[midx++] = m; } ++qrychar; } if (set_full) break; ++qry; qrychar = 0; } *coord_idx = cidx; *match_idx = midx; *align_idx = alignmentOffset; *matches = M; *nextqry = qry; *nextqrychar = qrychar; fprintf(stderr, "Allocing %d bytes of host memory for %d alignments\n", alignmentOffset * sizeof(Alignment), numAlignments); *alignments = (struct Alignment *) calloc(alignmentOffset, sizeof(Alignment)); //hipHostMalloc((void**)alignments, numAlignments * sizeof(Alignment)); } void runPrintKernel(MatchContext* ctx, ReferencePage* page, MatchInfo* h_matches, unsigned int numMatches, Alignment* alignments, unsigned int numAlignments) { MatchInfo* d_matches; size_t matchesSize = numMatches * sizeof(MatchInfo); CUDA_MALLOC((void**) &d_matches, matchesSize); struct Alignment * d_alignments; size_t alignmentSize = numAlignments * sizeof(Alignment); CUDA_MALLOC((void**) &d_alignments, alignmentSize); CUDA_SAFE_CALL(hipMemset((void*) d_alignments, 0, alignmentSize)); char* atimer = createTimer(); startTimer(atimer); // Copy matches to card fprintf(stderr, "prepared %d matches %d alignments\n", numMatches, numAlignments); fprintf(stderr, "Copying %d bytes to host memory for %d alignments\n", numAlignments * sizeof(Alignment), numAlignments); int DEBUG = 0; if (DEBUG) { for (int i = 0; i < numMatches; i++) { printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n", i, h_matches[i].resultsoffset, h_matches[i].queryid, h_matches[i].matchnode.data, h_matches[i].numLeaves, h_matches[i].edgematch, h_matches[i].qrystartpos); } exit(0); } CUDA_SAFE_CALL(hipMemcpy(d_matches, h_matches, matchesSize, hipMemcpyHostToDevice)); stopTimer(atimer); float mtime = getTimerValue(atimer); // Launch the kernel int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches; dim3 dimBlock(blocksize, 1, 1); dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1); fprintf(stderr, " Calling print kernel... "); hipLaunchKernelGGL(( printKernel) , dim3(dimGrid), dim3(dimBlock), 0 , 0, d_matches, numMatches, d_alignments, #if COALESCED_QUERIES ctx->results.d_coord_tex_array, #endif #if !QRYTEX #if COALESCED_QUERIES (int*) #endif ctx->queries->d_tex_array, #endif #if !NODETEX (_PixelOfNode*)ctx->ref->d_node_tex_array, #endif #if !CHILDTEX (_PixelOfChildren*)ctx->ref->d_children_tex_array, #endif ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, page->begin, page->end, page->shadow_left, page->shadow_right, ctx->min_match_length #if TREE_ACCESS_HISTOGRAM , ctx->ref->d_node_hist, ctx->ref->d_child_hist #endif ); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if ( hipSuccess != err) { fprintf(stderr, "Kernel execution failed: %s.\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } startTimer(atimer); // Copy the results back to the host CUDA_SAFE_CALL(hipMemcpy((void*)alignments, (void*)d_alignments, alignmentSize, hipMemcpyDeviceToHost)); hipDeviceSynchronize(); stopTimer(atimer); float atime = getTimerValue(atimer); fprintf(stderr, "memcpy time= %f\n", atime + mtime); deleteTimer(atimer); // Cleanup CUDA_SAFE_CALL(hipFree(d_alignments)); CUDA_SAFE_CALL(hipFree(d_matches)); } // TODO: need reverse-complement printing support void runPrintOnCPU(MatchContext* ctx, ReferencePage* page, MatchInfo* h_matches, unsigned int numMatches, Alignment* alignments, unsigned int numAlignments) { unsigned int min_match_length = ctx->min_match_length; int* addrs = ctx->queries->h_addrs_tex_array; int* lengths = ctx->queries->h_lengths_array; char* qrychars = ctx->queries->h_tex_array; if (!numMatches) return; int qry = -1; unsigned int qrylen; for (int i = 0; i < numMatches; ++i) { MatchInfo& match = h_matches[i]; if (match.queryid != qry) { qry = match.queryid; qrylen = lengths[qry]; } if (!(match.edgematch & FRMASK)) { printAlignments(page, alignments + match.resultsoffset, #if COALESCED_QUERIES qrychars + sizeof(int) * addrs[qry], #else qrychars + addrs[qry], #endif qrylen, match.matchnode, match.qrystartpos, match.edgematch, min_match_length, 0, ctx->forwardcoordinates); } } } int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen); void getExactAlignments(MatchContext * ctx, ReferencePage * page, bool on_cpu) { assert(!ctx->reverse && !ctx->forwardreverse); unsigned int boardFreeMemory; unsigned int total_mem; if (!on_cpu) { boardMemory(&boardFreeMemory, &total_mem); fprintf(stderr, "board free memory: %u total memory: %u\n", boardFreeMemory, total_mem); } else { boardFreeMemory = 256 * 1024 * 1024; total_mem = boardFreeMemory; } #ifdef __DEVICE_EMULATION__ boardFreeMemory = 512 * 1024 * 1024; #endif boardFreeMemory -= BREATHING_ROOM; fprintf(stderr, "board free memory: %u\n", boardFreeMemory); int rTotalMatches = 0; int rTotalAlignments = 0; int totalRounds = 0; unsigned int last_coord = ctx->results.numCoords; unsigned int next_coord = 0; unsigned int nextqry = 0; unsigned int nextqrychar = 0; int lastqry = -1; while (next_coord < last_coord) { // see how many queries will fit on the board totalRounds++; unsigned int numMatches = 0; unsigned int numAlignments = 0; MatchInfo* h_matches = NULL; Alignment* h_alignments = NULL; int coord_left = next_coord; char* btimer = createTimer(); startTimer(btimer); coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments, boardFreeMemory, &next_coord, &numMatches, &numAlignments, &nextqry, &nextqrychar); stopTimer(btimer); float btime = getTimerValue(btimer); ctx->statistics.t_coords_to_buffers += btime; fprintf(stderr, "buffer prep time= %f\n", btime); deleteTimer(btimer); fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) of %d using %d matches and %d alignments\n", totalRounds, coord_left, next_coord, last_coord, numMatches, numAlignments); if (numMatches == 0) continue; char buf[256]; //assert(qryend > qrystart); rTotalAlignments += numAlignments; rTotalMatches += numMatches; if (num_bind_tex_calls > 100) { hipDeviceReset(); num_bind_tex_calls = 0; loadReference(ctx); loadQueries(ctx); } char* ktimer = createTimer(); startTimer(ktimer); if (on_cpu) { runPrintOnCPU(ctx, page, h_matches, numMatches, h_alignments, numAlignments); } else { runPrintKernel(ctx, page, h_matches, numMatches, h_alignments, numAlignments); } stopTimer(ktimer); float ktime = getTimerValue(ktimer); ctx->statistics.t_print_kernel += ktime; fprintf(stderr, "print kernel time= %f\n", ktime); deleteTimer(ktimer); // char* stimer = createTimer(); // startTimer(stimer); // mapQueriesEndToEnd(ctx, // page, // h_matches, // numMatches, // h_alignments, // numAlignments); // // stopTimer(stimer); // // float stime = getTimerValue(stimer); // fprintf(stderr, "postprocess time= %f\n", stime); // deleteTimer(stimer); //flushOutput(); //Process the alignments char* otimer = createTimer(); startTimer(otimer); for (int m = 0; m < numMatches; m++) { int base = h_matches[m].resultsoffset; for (int i = 0; i < h_matches[m].numLeaves; i++) { // See if there are any more left maximal alignments for this match if (h_alignments[base+i].left_in_ref == 0) { break; } if (h_matches[m].queryid != lastqry) { lastqry = h_matches[m].queryid; addToBuffer("> "); addToBuffer(*(ctx->queries->h_names + lastqry)); addToBuffer("\n"); } sprintf(buf, "%d\t%d\t%d\n", h_alignments[base+i].left_in_ref, h_matches[m].qrystartpos + 1, h_alignments[base+i].matchlen); addToBuffer(buf); // addMatchToBuffer(h_alignments[base+i].left_in_ref, // h_matches[m].qrystartpos + 1, // h_alignments[base+i].matchlen); } } flushOutput(); stopTimer(otimer); ctx->statistics.t_results_to_disk += getTimerValue(otimer); deleteTimer(otimer); free(h_matches); free(h_alignments); //hipHostFree((void*)h_alignments); } free(ctx->results.h_coord_tex_array); free(ctx->results.h_match_coords); ctx->results.h_coord_tex_array = NULL; ctx->results.h_match_coords = NULL; fprintf(stderr, "Finished processing %d matches and %d potential alignments in %d rounds\n", rTotalMatches, rTotalAlignments, totalRounds); } int getQueryBlock(MatchContext* ctx, size_t device_mem_avail) { QuerySet* queries = ctx->queries; char * queryTex = NULL; int* queryAddrs = NULL; int* queryLengths = NULL; unsigned int numQueries; unsigned int num_match_coords; size_t queryLen; char** names; fprintf(stderr, "Loading query block... "); char* queryreadtimer = createTimer(); startTimer(queryreadtimer); getQueriesTexture(queries->qfile, &queryTex, &queryLen, &queryAddrs, &names, &queryLengths, &numQueries, &num_match_coords, device_mem_avail, ctx->min_match_length, ctx->reverse || ctx->forwardreverse); stopTimer(queryreadtimer); ctx->statistics.t_queries_from_disk += getTimerValue(queryreadtimer); deleteTimer(queryreadtimer); queries->h_tex_array = queryTex; queries->count = numQueries; queries->h_addrs_tex_array = queryAddrs; queries->texlen = queryLen; queries->h_names = names; queries->h_lengths_array = queryLengths; ctx->results.numCoords = num_match_coords; fprintf(stderr, "done.\n"); return numQueries; } void destroyQueryBlock(QuerySet* queries) { free(queries->h_tex_array); queries->h_tex_array = NULL; for (int i = 0; i < queries->count; ++i) free(queries->h_names[i]); free(queries->h_names); queries->count = 0; queries->texlen = 0; free(queries->h_addrs_tex_array); queries->h_addrs_tex_array = NULL; free(queries->h_lengths_array); queries->h_lengths_array = NULL; } void resetStats(Statistics* stats) { stats->t_end_to_end = 0.0; stats->t_match_kernel = 0.0; stats->t_print_kernel = 0.0; stats->t_queries_to_board = 0.0; stats->t_match_coords_to_board = 0.0; stats->t_match_coords_from_board = 0.0; stats->t_tree_to_board = 0.0; stats->t_ref_str_to_board = 0.0; stats->t_queries_from_disk = 0.0; stats->t_ref_from_disk = 0.0; stats->t_results_to_disk = 0.0; stats->t_tree_construction = 0.0; stats->t_tree_reorder = 0.0; stats->t_tree_flatten = 0.0; stats->t_reorder_ref_str = 0.0; stats->t_build_coord_offsets = 0.0; stats->t_coords_to_buffers = 0.0; stats->bp_avg_query_length = 0.0; #if TREE_ACCESS_HISTOGRAM if (stats->node_hist_size) { free(stats->node_hist); stats->node_hist = NULL; stats->node_hist_size = 0; } if (stats->child_hist_size) { free(stats->child_hist); stats->child_hist = NULL; stats->child_hist_size = 0; } #endif } void writeStatisticsFile(Statistics* stats, char* stats_filename, char* node_hist_filename = NULL, char* child_hist_filename = NULL) { if (stats_filename) { FILE* f = fopen(stats_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename); } else { fprintf(f, "Q"); fprintf(f, ",R"); fprintf(f, ",T"); fprintf(f, ",m"); fprintf(f, ",r"); fprintf(f, ",t"); fprintf(f, ",n"); fprintf(f, ",Total"); fprintf(f, ",Match kernel"); fprintf(f, ",Print Kernel"); fprintf(f, ",Queries to board"); fprintf(f, ",Match coords to board"); fprintf(f, ",Match coords from board"); fprintf(f, ",Tree to board"); fprintf(f, ",Ref str to board"); fprintf(f, ",Queries from disk"); fprintf(f, ",Ref from disk"); fprintf(f, ",Output to disk"); fprintf(f, ",Tree construction"); fprintf(f, ",Tree reorder"); fprintf(f, ",Tree flatten"); fprintf(f, ",Ref reorder"); fprintf(f, ",Build coord table"); fprintf(f, ",Coords to buffers"); fprintf(f, ",Avg qry length"); fprintf(f, "\n"); fprintf(f, "%d", QRYTEX); fprintf(f, ",%d", REFTEX); fprintf(f, ",%d", TREETEX); fprintf(f, ",%d", MERGETEX); fprintf(f, ",%d", REORDER_REF); fprintf(f, ",%d", REORDER_TREE); fprintf(f, ",%d", RENUMBER_TREE); fprintf(f, ",%f", stats->t_end_to_end); fprintf(f, ",%f", stats->t_match_kernel); fprintf(f, ",%f", stats->t_print_kernel); fprintf(f, ",%f", stats->t_queries_to_board); fprintf(f, ",%f", stats->t_match_coords_to_board); fprintf(f, ",%f", stats->t_match_coords_from_board); fprintf(f, ",%f", stats->t_tree_to_board); fprintf(f, ",%f", stats->t_ref_str_to_board); fprintf(f, ",%f", stats->t_queries_from_disk); fprintf(f, ",%f", stats->t_ref_from_disk); fprintf(f, ",%f", stats->t_results_to_disk); fprintf(f, ",%f", stats->t_tree_construction); fprintf(f, ",%f", stats->t_tree_reorder); fprintf(f, ",%f", stats->t_tree_flatten); fprintf(f, ",%f", stats->t_reorder_ref_str); fprintf(f, ",%f", stats->t_build_coord_offsets); fprintf(f, ",%f", stats->t_coords_to_buffers); fprintf(f, ",%f", stats->bp_avg_query_length); fprintf(f,"\n"); fclose(f); } } #if TREE_ACCESS_HISTOGRAM if (node_hist_filename) { FILE* f = fopen(node_hist_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", node_hist_filename); } else { for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]); } } if (child_hist_filename) { FILE* f = fopen(child_hist_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", child_hist_filename); } else { for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]); } } float total_node_hits = 0; float tree_top_node_hits = 0; float total_child_hits = 0; float tree_top_child_hits = 0; for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) { total_node_hits +=ctx->statistics.node_hist[i]; if (i < 256) { tree_top_node_hits += ctx->statistics.node_hist[i]; } } for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) { total_child_hits +=ctx->statistics.child_hist[i]; if (i < 256) { tree_top_child_hits += ctx->statistics.child_hist[i]; } } fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n",(int)tree_top_node_hits, (int)total_node_hits, tree_top_node_hits /total_node_hits); fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n",(int)tree_top_child_hits, (int)total_child_hits, tree_top_child_hits /total_child_hits); #endif } void matchOnCPU(MatchContext* ctx, bool doRC) { //TODO: CPU is matching is disabled. if (doRC) { // Match the reverse complement of the queries to the ref computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, REVERSE); } else { computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, FORWARD); } } void matchOnGPU(MatchContext* ctx, bool doRC) { int numQueries = ctx->queries->count; int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries; dim3 dimBlock(blocksize, 1, 1); dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1); // Match the reverse complement of the queries to the ref if (doRC) { //TODO: GPU RC is disabled hipLaunchKernelGGL(( mummergpuRCKernel) , dim3(dimGrid), dim3(dimBlock), 0 , 0, ctx->results.d_match_coords, ctx->queries->d_tex_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length); } else { hipLaunchKernelGGL(( mummergpuKernel) , dim3(dimGrid), dim3(dimBlock), 0 , 0, ctx->results.d_match_coords, #if COALESCED_QUERIES ctx->results.d_coord_tex_array, #endif #if !QRYTEX #if COALESCED_QUERIES (int*) #endif ctx->queries->d_tex_array, #endif #if !NODETEX (_PixelOfNode*)(ctx->ref->d_node_tex_array), #endif #if !CHILDTEX (_PixelOfChildren*)(ctx->ref->d_children_tex_array), #endif #if !REFTEX (char*)ctx->ref->d_ref_array, #endif ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length #if TREE_ACCESS_HISTOGRAM , ctx->ref->d_node_hist, ctx->ref->d_child_hist #endif ); } // check if kernel execution generated an error hipError_t err = hipGetLastError(); if ( hipSuccess != err) { fprintf(stderr, "Kernel execution failed: %s.\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } void getMatchResults(MatchContext* ctx, unsigned int page_num) { transferResultsFromDevice(ctx); } void matchQueryBlockToReferencePage(MatchContext* ctx, ReferencePage* page, bool reverse_complement) { char* ktimer = createTimer(); fprintf(stderr, "Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n", ctx->queries->bytes_on_board, ctx->ref->bytes_on_board, ctx->results.bytes_on_board); startTimer(ktimer); if (ctx->on_cpu) { matchOnCPU(ctx, reverse_complement); } else { matchOnGPU(ctx, reverse_complement); hipDeviceSynchronize(); } stopTimer(ktimer); float ktime = getTimerValue(ktimer); ctx->statistics.t_match_kernel += ktime; fprintf(stderr, "match kernel time= %f\n", ktime); deleteTimer(ktimer); getMatchResults(ctx, page->id); unloadResultBuffer(ctx); } int matchSubset(MatchContext* ctx, ReferencePage* page) { loadQueries(ctx); fprintf(stderr, "Matching queries %s - %s against ref coords %d - %d\n", ctx->queries->h_names[0], ctx->queries->h_names[ctx->queries->count - 1], page->begin, page->end); loadResultBuffer(ctx); // TODO: renable RC support by calling this twice /w reverse/fwdreverse // idiom. matchQueryBlockToReferencePage(ctx, page, false); if (USE_PRINT_KERNEL && !ctx->on_cpu) { getExactAlignments(ctx, page, false); } else { getExactAlignments(ctx, page, true); } flushOutput(); unloadQueries(ctx); return 0; } int getFreeDeviceMemory(bool on_cpu) { unsigned int free_mem = 0; unsigned int total_mem = 0; // We have to 'prime' CUDA by making an allocation here. cuMemGetInfo // will return zeroes until we do a malloc. int * p = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&p, sizeof(int))); CUDA_SAFE_CALL(hipFree(p)); if (!on_cpu) { boardMemory(&free_mem, &total_mem); fprintf(stderr, "board free memory: %u total memory: %u\n", free_mem, total_mem); } else { total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX } return free_mem; } int matchQueriesToReferencePage(MatchContext* ctx, ReferencePage* page) { fprintf(stderr, "Beginning reference page %p\n", page); int free_mem = getFreeDeviceMemory(ctx->on_cpu); int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM; ctx->ref = &(page->ref); loadReference(ctx); while (getQueryBlock(ctx, available_mem)) { matchSubset(ctx, page); ctx->statistics.bp_avg_query_length = ctx->queries->texlen / (float)(ctx->queries->count) - 2; destroyQueryBlock(ctx->queries); if (num_bind_tex_calls > 100) { hipDeviceReset(); num_bind_tex_calls = 0; loadReference(ctx); } } unloadReferenceString(ctx->ref); unloadReferenceTree(ctx); lseek(ctx->queries->qfile, 0, SEEK_SET); return 0; } void initReferencePages( MatchContext* ctx , int* num_pages, ReferencePage** pages_out) { unsigned int bases_in_ref = ctx->full_ref_len - 3; unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref; unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size); fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n", num_reference_pages, bases_in_ref, page_size); unsigned int page_overlap = MAX_QUERY_LEN + 1; ReferencePage* pages = (ReferencePage*) calloc(num_reference_pages, sizeof(ReferencePage)); pages[0].begin = 1; pages[0].end = pages[0].begin + page_size + ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning pages[0].shadow_left = -1; pages[0].id = 0; for (int i = 1; i < num_reference_pages - 1; ++i) { pages[i].begin = pages[i - 1].end - page_overlap; pages[i].end = pages[i].begin + page_size + page_overlap; pages[i - 1].shadow_right = pages[i].begin; pages[i].shadow_left = pages[i-1].end; pages[i].id = i; } if (num_reference_pages > 1) { int last_page = num_reference_pages - 1; pages[last_page].begin = pages[last_page - 1].end - page_overlap; pages[last_page].end = ctx->full_ref_len - 1; pages[last_page - 1].shadow_right = pages[last_page].begin; pages[last_page].shadow_right = -1; pages[last_page].shadow_left = pages[last_page - 1].end; pages[last_page].id = last_page; } *pages_out = pages; *num_pages = num_reference_pages; } int streamReferenceAgainstQueries(MatchContext* ctx) { int num_reference_pages = 0; ReferencePage* pages = NULL; initReferencePages(ctx, &num_reference_pages, &pages); buildReferenceTexture(&(pages[0].ref), ctx->full_ref, pages[0].begin, pages[0].end, ctx->min_match_length, ctx->dotfilename, ctx->texfilename, &(ctx->statistics)); matchQueriesToReferencePage(ctx, &pages[0]); destroyReference(&(pages[0].ref)); for (int i = 1; i < num_reference_pages - 1; ++i) { buildReferenceTexture(&(pages[i].ref), ctx->full_ref, pages[i].begin, pages[i].end, ctx->min_match_length, NULL, NULL, &(ctx->statistics)); matchQueriesToReferencePage(ctx, &pages[i]); destroyReference(&(pages[i].ref)); } if (num_reference_pages > 1) { int last_page = num_reference_pages - 1; buildReferenceTexture(&(pages[last_page].ref), ctx->full_ref, pages[last_page].begin, pages[last_page].end, ctx->min_match_length, NULL, NULL, &(ctx->statistics)); matchQueriesToReferencePage(ctx, &pages[last_page]); destroyReference(&(pages[last_page].ref)); } free(pages); return 0; } extern "C" int matchQueries(MatchContext* ctx) { assert(sizeof(struct PixelOfNode) == sizeof(uint4)); assert(sizeof(struct PixelOfChildren) == sizeof(uint4)); #if TREE_ACCESS_HISTOGRAM ctx->statistics.node_hist_size = 0; ctx->statistics.child_hist_size = 0; #endif resetStats(&(ctx->statistics)); char* ttimer = createTimer(); startTimer(ttimer); int ret; fprintf(stderr, "Streaming reference pages against all queries\n"); ret = streamReferenceAgainstQueries(ctx); stopTimer(ttimer); ctx->statistics.t_end_to_end += getTimerValue(ttimer); deleteTimer(ttimer); writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out", "child_hist.out"); return ret; }
c953325916a3808d02d4a89981619566bfbfeadf.cu
// Includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/types.h> #include <unistd.h> #include <errno.h> #include <sys/time.h> #include <cuda.h> #include <vector_types.h> // includes, kernels #include <common.cu> #include <mummergpu.h> #include <mummergpu_kernel.cu> int USE_PRINT_KERNEL = 1; #define BREATHING_ROOM (16 * 1024 * 1024) #define BASES_PER_TREE_PAGE 8388608 //#define BASES_PER_TREE_PAGE 7000000 #define BLOCKSIZE 256 unsigned int cuda_calls = 0; void trap_dbg() { fprintf(stderr, "Trapped\n"); } #define CUDA_SAFE_CALL( call) do { \ cuda_calls++; \ cudaError err = call; \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \ __FILE__, __LINE__, err, cudaGetErrorString( err) ); \ trap_dbg(); \ exit(EXIT_FAILURE); \ } } while (0) # define CU_SAFE_CALL_NO_SYNC( call ) do { \ CUresult err = call; \ if( CUDA_SUCCESS != err) { \ fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \ err, __FILE__, __LINE__ ); \ exit(EXIT_FAILURE); \ } } while (0) # define CUT_DEVICE_INIT_DRV(cuDevice) do { \ cuDevice = 0; \ int deviceCount = 0; \ CUresult err = cuInit(0); \ if (CUDA_SUCCESS == err) \ CU_SAFE_CALL_NO_SYNC(cuDeviceGetCount(&deviceCount)); \ if (deviceCount == 0) { \ fprintf(stderr, "There is no device.\n"); \ exit(EXIT_FAILURE); \ } \ int dev; \ for (dev = 0; dev < deviceCount; ++dev) { \ int major, minor; \ CU_SAFE_CALL_NO_SYNC(cuDeviceComputeCapability(&major, &minor, dev));\ if (major >= 1) \ break; \ } \ if (dev == deviceCount) { \ fprintf(stderr, "There is no device supporting CUDA.\n"); \ exit(EXIT_FAILURE); \ } \ else \ CU_SAFE_CALL_NO_SYNC(cuDeviceGet(&cuDevice, dev)); \ } while (0) unsigned int num_bind_tex_calls = 0; #define BIND_TEX(offset, tex, arr, desc, len) do { \ CUDA_SAFE_CALL(cudaBindTexture(offset, tex, arr, desc, len)); \ ++num_bind_tex_calls; \ } while(0) #define BIND_TEX_ARRAY(tex, arr, desc) do { \ CUDA_SAFE_CALL(cudaBindTextureToArray(tex, arr, desc)); \ ++num_bind_tex_calls; \ } while(0) #define CUDA_MALLOC(ptr, size) do { \ cudaMalloc(ptr, size); \ ++num_bind_tex_calls; \ } while(0) #define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) do { \ cudaMallocPitch(ptr, out_pitch, rowsize, numrows); \ ++num_bind_tex_calls; \ } while(0) #define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) do { \ cudaMallocArray(ptr, desc, pitch, rows); \ ++num_bind_tex_calls; \ } while(0) //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" void computeGold(MatchResults* results, char* refstr, char* queries, int* queryAddrs, int* queryLengths, PixelOfNode* nodeTexture, PixelOfChildren* childrenTexture, int numQueries, int mismatch_length, int rc); extern "C" void getReferenceString(const char * filename, char** refstr, size_t* reflen); extern "C" void createTreeTexture(const char * filename, PixelOfNode** nodeTexture, PixelOfChildren** childrenTexture, unsigned int* width, unsigned int* node_height, unsigned int* children_height, AuxiliaryNodeData** aux_data, int* num_match_coords, int min_match_len, Statistics* statistics, const char * dotfilename, const char * texfilename); extern "C" void getQueriesTexture(int qfile, char** queryTexture, size_t* queryLength, int** queryAddrs, char*** queryNames, int** queryLengths, unsigned int* numQueries, unsigned int* num_match_coords, unsigned int device_memory_avail, int min_match_length, bool rc); extern "C" int lookupNumLeaves(ReferencePage * page, TextureAddress addr); void printAlignments(ReferencePage* page, Alignment* alignments, char* query, int qrylen, TextureAddress nodeid, int qrypos, int edge_depth, int min_match, bool rc, bool forwardcoordinates); int countLeafNodes(int nodeid); extern "C" void mapQueriesEndToEnd(MatchContext* ctx, ReferencePage* page, MatchInfo* h_matches, unsigned int numMatches, Alignment* h_alignments, unsigned int numAligments); char * createTimer() { unsigned int * ptr = (unsigned int *) malloc(sizeof(struct Timer_t)); memset(ptr, 0, sizeof(struct Timer_t)); return (char *) ptr; } void startTimer(char * ptr) { gettimeofday(&(((struct Timer_t *)ptr)->start_m), NULL); } void stopTimer(char * ptr) { gettimeofday(&(((struct Timer_t *)ptr)->end_m), NULL); } float getTimerValue(char * ptr) { Timer_t * timer = (Timer_t*) ptr; if (timer == NULL) { fprintf(stderr, "Uninitialized timer!!!\n"); return 0.0; } if (timer->end_m.tv_sec == 0) { stopTimer(ptr); } return (float) (1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec) + (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec))); } void deleteTimer(char * ptr) { free((Timer_t *)ptr); } extern "C" int createReference(const char* fromFile, Reference* ref) { if (!fromFile || !ref) return -1; char * loadreftimer = createTimer(); startTimer(loadreftimer); getReferenceString(fromFile, &(ref->str), &(ref->len)); stopTimer(loadreftimer); ref->t_load_from_disk += getTimerValue(loadreftimer); deleteTimer(loadreftimer); return 0; } extern "C" int destroyReference(Reference* ref) { free(ref->h_node_tex_array); free(ref->h_children_tex_array); free(ref->str); #if REORDER_REF free(ref->h_ref_array); #endif free(ref->aux_data); #if TREE_ACCESS_HISTOGRAM free(ref->h_node_hist); free(ref->h_child_hist); #endif ref->str = NULL; ref->len = 0; return 0; } extern "C" int createQuerySet(const char* fromFile, QuerySet* queries) { fprintf(stderr, "Opening %s...\n", fromFile); int qfile = open(fromFile, O_RDONLY); if (qfile == -1) { fprintf(stderr, "Can't open %s: %d\n", fromFile, errno); exit (1); } queries->qfile = qfile; return 0; } extern "C" int destroyQuerySet(QuerySet* queries) { if (queries->qfile) close(queries->qfile); return 0; } extern "C" void printStringForError(int err) { } extern "C" int createMatchContext(Reference* ref, QuerySet* queries, MatchResults* matches, bool on_cpu, int min_match_length, char* stats_file, bool reverse, bool forwardreverse, bool forwardcoordinates, bool showQueryLength, char* dotfilename, char* texfilename, MatchContext* ctx) { ctx->queries = queries; ctx->ref = ref; ctx->full_ref = ref->str; ctx->full_ref_len = ref->len; ctx->on_cpu = on_cpu; ctx->min_match_length = min_match_length; ctx->stats_file = stats_file; ctx->reverse = reverse; ctx->forwardreverse = forwardreverse; ctx->forwardcoordinates = forwardcoordinates; ctx->show_query_length = showQueryLength; ctx->dotfilename = dotfilename; ctx->texfilename = texfilename; return 0; } extern "C" int destroyMatchContext(MatchContext* ctx) { free(ctx->full_ref); //destroyReference(ctx->ref); destroyQuerySet(ctx->queries); return 0; } void buildReferenceTexture(Reference* ref, char* full_ref, size_t begin, size_t end, int min_match_len, char* dotfilename, char* texfilename, Statistics* statistics) { fprintf(stderr, "Building reference texture...\n"); PixelOfNode* nodeTexture = NULL; PixelOfChildren * childrenTexture = NULL; unsigned int width = 0; unsigned int node_height = 0; unsigned int children_height = 0; AuxiliaryNodeData* aux_data = NULL; int num_nodes; char * loadreftimer = createTimer(); startTimer(loadreftimer); ref->len = end - begin + 3; ref->str = (char*)malloc(ref->len); ref->str[0] = 's'; strncpy(ref->str + 1, full_ref + begin, ref->len - 3); strcpy(ref->str + ref->len - 2, "$"); stopTimer(loadreftimer); statistics->t_ref_from_disk += getTimerValue(loadreftimer) + ref->t_load_from_disk; deleteTimer(loadreftimer); createTreeTexture(ref->str, &nodeTexture, &childrenTexture, &width, &node_height, &children_height, &aux_data, &num_nodes, min_match_len, statistics, dotfilename, texfilename); ref->h_node_tex_array = nodeTexture; ref->h_children_tex_array = childrenTexture; ref->tex_width = width; ref->tex_node_height = node_height; ref->tex_children_height = children_height; #if TREE_ACCESS_HISTOGRAM ref->h_node_hist = (int*)calloc(width * node_height, sizeof(int)); ref->h_child_hist = (int*)calloc(width * children_height, sizeof(int)); #endif ref->aux_data = aux_data; ref->num_nodes = num_nodes; ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) + (width * children_height * sizeof(PixelOfChildren)); fprintf(stderr, "This tree will need %d bytes on the board\n", ref->bytes_on_board); #if REORDER_REF char * reordertimer = createTimer(); startTimer(reordertimer); unsigned int refpitch = ref->pitch = 65536; int numrows = ceil(ref->len / ((float)refpitch)); int blocksize = 4; numrows += blocksize; int refstrsize = numrows * refpitch; ref->h_ref_array = (char *) malloc(refstrsize); ref->bytes_on_board += refstrsize; fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize); int z_max = numrows * refpitch; for (int z = 0; z < z_max; z++) { ref->h_ref_array[z] = 'Z'; } int x, y; int maxx = 0, maxy = 0; size_t reflen = ref->len; char* refstr = ref->str; int block_dim = refpitch * blocksize; for (int i = 0; i < reflen; i++) { int bigx = i % (block_dim); // ref string reorder int bigy = i / (block_dim); y = bigy * blocksize + bigx % blocksize; x = bigx / blocksize; // printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]); assert(x < refpitch); assert(y < numrows); ref->h_ref_array[y*refpitch+x] = refstr[i]; if (x > maxx) { maxx = x; } if (y > maxy) { maxy = y; } } if ((maxx >= refpitch) || (maxy >= numrows)) { fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n", maxx, refpitch, maxy, numrows); exit(1); } stopTimer(reordertimer); if (statistics) statistics->t_reorder_ref_str += getTimerValue(reordertimer); deleteTimer(reordertimer); #else fprintf(stderr, "The refstr requires %d bytes\n", ref->len); ref->bytes_on_board += ref->len; #endif } void boardMemory(unsigned int * free_mem, unsigned int * total_mem) { // The emulator doesn't allow calls to cuMemGetInfo #ifdef __DEVICE_EMULATION__ *free_mem = 512*1024*1024; *total_mem = 768*1024*1024; #else CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem)); #endif } void loadReferenceTexture(MatchContext* ctx) { Reference* ref = ctx->ref; int numrows = ceil(ref->len / ((float)ref->pitch)); int blocksize = 4; numrows += blocksize; cudaChannelFormatDesc refTextureDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSigned); if (!ctx->on_cpu) { char * toboardtimer = createTimer(); startTimer(toboardtimer); #if REFTEX #if REORDER_REF CUDA_MALLOC_ARRAY((cudaArray**)(&ref->d_ref_array), &refTextureDesc, ref->pitch, numrows); CUDA_SAFE_CALL(cudaMemcpyToArray( (cudaArray*)(ref->d_ref_array), 0, 0, ref->h_ref_array, numrows*ref->pitch, cudaMemcpyHostToDevice)); reftex.addressMode[0] = cudaAddressModeClamp; reftex.addressMode[1] = cudaAddressModeClamp; reftex.filterMode = cudaFilterModePoint; reftex.normalized = false; BIND_TEX_ARRAY(reftex, (cudaArray*)ref->d_ref_array, refTextureDesc); ctx->ref->bytes_on_board += numrows * ref->pitch; #else CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len); CUDA_SAFE_CALL( cudaMemcpy( (void*)(ref->d_ref_array), ref->str, ref->len, cudaMemcpyHostToDevice) ); reftex.addressMode[0] = cudaAddressModeClamp; reftex.filterMode = cudaFilterModePoint; reftex.normalized = false; // access with normalized texture coordinates cudaChannelFormatDesc refDesc = cudaCreateChannelDesc(8,0,0,0, cudaChannelFormatKindUnsigned); BIND_TEX(0, reftex, (void*)(ref->d_ref_array), refDesc, ref->len); ctx->ref->bytes_on_board += ref->len; #endif #else #if REORDER_REF size_t refpitch; CUDA_MALLOC_PITCH( (void**)(&ref->d_ref_array), &refpitch, ref->pitch * sizeof(char), numrows); CUDA_SAFE_CALL( cudaMemcpy2D((ref->d_ref_array), refpitch, ref->h_ref_array, ref->pitch , ref->pitch * sizeof(char), numrows, cudaMemcpyHostToDevice)); ctx->ref->bytes_on_board += numrows * ref->pitch; #else CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len); CUDA_SAFE_CALL( cudaMemcpy( (void*)(ref->d_ref_array), ref->str, ref->len, cudaMemcpyHostToDevice) ); ctx->ref->bytes_on_board += ref->len; #endif #endif stopTimer(toboardtimer); ctx->statistics.t_ref_str_to_board += getTimerValue(toboardtimer); deleteTimer(toboardtimer); } else { ref->d_ref_array = NULL; } } void unloadReferenceString(Reference* ref) { #if REFTEX CUDA_SAFE_CALL(cudaUnbindTexture( reftex ) ); #endif #if REORDER_REF && REFTEX CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_ref_array))); #else CUDA_SAFE_CALL(cudaFree((ref->d_ref_array))); #endif ref->d_ref_array = NULL; } void unloadReferenceTree(MatchContext* ctx) { Reference* ref = ctx->ref; #if REORDER_TREE // Unload nodetex #if NODETEX CUDA_SAFE_CALL(cudaUnbindTexture( nodetex ) ); CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_node_tex_array))); #else CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array)); #endif ref->d_node_tex_array = NULL; // Unload childrentex if (ref->d_children_tex_array) { #if CHILDTEX CUDA_SAFE_CALL(cudaUnbindTexture( childrentex ) ); CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_children_tex_array))); #else CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array)); #endif } ref->d_children_tex_array = NULL; #else #if NODETEX CUDA_SAFE_CALL(cudaUnbindTexture( nodetex ) ); #endif CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array)); ref->d_node_tex_array = NULL; // Unload childrentex if (ref->d_children_tex_array) { #if CHILDTEX CUDA_SAFE_CALL(cudaUnbindTexture( childrentex ) ); #endif CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array)); ref->d_children_tex_array = NULL; } #endif #if TREE_ACCESS_HISTOGRAM CUDA_SAFE_CALL(cudaFree(ref->d_node_hist)); ref->d_node_hist = NULL; CUDA_SAFE_CALL(cudaFree(ref->d_child_hist)); ref->d_child_hist = NULL; #endif } //loads a tree and text for [begin, end) in the reference void loadReference(MatchContext* ctx) { Reference* ref = ctx->ref; ref->bytes_on_board = 0; loadReferenceTexture(ctx); if (!ctx->on_cpu) { char * toboardtimer = createTimer(); startTimer(toboardtimer); // node texels ref->bytes_on_board += ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode)); // children texels ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren); #if REORDER_TREE #if NODETEX cudaChannelFormatDesc nodeTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned); CUDA_MALLOC_ARRAY( (cudaArray**)(&ref->d_node_tex_array), &nodeTextureDesc, ref->tex_width, ref->tex_node_height ); CUDA_SAFE_CALL( cudaMemcpyToArray( (cudaArray*)(ref->d_node_tex_array), 0, 0, ref->h_node_tex_array, ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode), cudaMemcpyHostToDevice)); nodetex.addressMode[0] = cudaAddressModeClamp; nodetex.addressMode[1] = cudaAddressModeClamp; nodetex.filterMode = cudaFilterModePoint; nodetex.normalized = false; // access with normalized texture coordinates BIND_TEX_ARRAY(nodetex, (cudaArray*)ref->d_node_tex_array, nodeTextureDesc); #else size_t nodepitch; CUDA_MALLOC_PITCH( (void**)(&ref->d_node_tex_array), &nodepitch, ref->tex_width * sizeof(PixelOfNode), ref->tex_node_height ); CUDA_SAFE_CALL( cudaMemcpy2D((ref->d_node_tex_array), nodepitch, ref->h_node_tex_array, nodepitch, ref->tex_width * sizeof(PixelOfNode), ref->tex_node_height, cudaMemcpyHostToDevice)); #endif if (ref->tex_children_height) { #if CHILDTEX cudaChannelFormatDesc childrenTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned); CUDA_MALLOC_ARRAY( (cudaArray**)(&ref->d_children_tex_array), &childrenTextureDesc, ref->tex_width, ref->tex_children_height ); CUDA_SAFE_CALL( cudaMemcpyToArray((cudaArray*)(ref->d_children_tex_array), 0, 0, ref->h_children_tex_array, ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren), cudaMemcpyHostToDevice)); childrentex.addressMode[0] = cudaAddressModeClamp; childrentex.addressMode[1] = cudaAddressModeClamp; childrentex.filterMode = cudaFilterModePoint; childrentex.normalized = false; // access with normalized texture coordinates BIND_TEX_ARRAY(childrentex, (cudaArray*)(ref->d_children_tex_array), childrenTextureDesc); #else size_t childpitch; CUDA_MALLOC_PITCH( (void**)(&ref->d_children_tex_array), &childpitch, ref->tex_width * sizeof(PixelOfChildren), ref->tex_children_height ); CUDA_SAFE_CALL( cudaMemcpy2D((ref->d_children_tex_array), childpitch, ref->h_children_tex_array, childpitch, ref->tex_width * sizeof(PixelOfNode), ref->tex_children_height, cudaMemcpyHostToDevice)); #endif } #if TREE_ACCESS_HISTOGRAM // node hist ref->bytes_on_board += ref->tex_width * ref->tex_node_height * sizeof(int); CUDA_MALLOC( (void**)(&ref->d_node_hist), ref->tex_width * ref->tex_node_height *sizeof(int)); CUDA_SAFE_CALL( cudaMemset((ref->d_node_hist),0, ref->tex_width * ref->tex_node_height * sizeof(int))); if (ref->tex_children_height) { // children hist ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(int); fprintf(stderr, "after child_hist ref->bytes_on_board:%ld\n", ref->bytes_on_board); CUDA_MALLOC( (void**)(&ref->d_child_hist), ref->tex_width * ref->tex_children_height *sizeof(int)); CUDA_SAFE_CALL( cudaMemset((ref->d_child_hist),0, ref->tex_width * ref->tex_children_height * sizeof(int))); } #endif #else // NO TREE REORDERING // Node tex, 1-dimensional CUDA_MALLOC( (void**)(&ref->d_node_tex_array), ref->tex_node_height * sizeof(PixelOfNode)); CUDA_SAFE_CALL( cudaMemcpy( (ref->d_node_tex_array), ref->h_node_tex_array, ref->tex_node_height * sizeof(PixelOfNode), cudaMemcpyHostToDevice)); #if NODETEX cudaChannelFormatDesc nodeTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned); nodetex.addressMode[0] = cudaAddressModeClamp; nodetex.filterMode = cudaFilterModePoint; nodetex.normalized = false; // access with normalized texture coordinates BIND_TEX(0, nodetex, (void*)(ref->d_node_tex_array), nodeTextureDesc, ref->tex_node_height* sizeof(PixelOfNode)); #endif if (ref->tex_children_height) { // Child tex, 1-dimensional CUDA_MALLOC( (void**)(&ref->d_children_tex_array), ref->tex_children_height * sizeof(PixelOfChildren)); CUDA_SAFE_CALL( cudaMemcpy( (ref->d_children_tex_array), ref->h_children_tex_array, ref->tex_children_height * sizeof(PixelOfChildren), cudaMemcpyHostToDevice)); #if CHILDTEX cudaChannelFormatDesc childTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned); childrentex.addressMode[0] = cudaAddressModeClamp; childrentex.filterMode = cudaFilterModePoint; childrentex.normalized = false; // access with normalized texture coordinates BIND_TEX(0, childrentex, (void*)(ref->d_children_tex_array), childTextureDesc, ref->tex_children_height* sizeof(PixelOfChildren)); #endif } #if TREE_ACCESS_HISTOGRAM ref->bytes_on_board += ref->tex_node_height * sizeof(int); CUDA_MALLOC( (void**)(&ref->d_node_hist), ref->tex_node_height *sizeof(int)); CUDA_SAFE_CALL( cudaMemset((ref->d_node_hist),0, ref->tex_node_height * sizeof(int))); if (ref->tex_children_height) { ref->bytes_on_board += ref->tex_children_height * sizeof(int); CUDA_MALLOC( (void**)(&ref->d_child_hist), ref->tex_children_height *sizeof(int)); CUDA_SAFE_CALL( cudaMemset((ref->d_child_hist),0, ref->tex_children_height * sizeof(int))); } #endif #endif #if TWO_LEVEL_NODE_TREE PixelOfNode node_buf[NODE_THRESH]; memset(node_buf, 0, sizeof(node_buf)); for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes); ++i) { TextureAddress myaddress(id2addr(i)); #if MERGETEX && REORDER_TREE myaddress.x &= 0x7FF; myaddress.x *= 2; int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc]; #elif REORDER_TREE int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc]; #elif MERGETEX node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x*2]; #else node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x]; #endif } CUDA_SAFE_CALL( cudaMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf))); #endif #if TWO_LEVEL_CHILD_TREE PixelOfChildren child_buf[CHILD_THRESH]; memset(child_buf, 0, sizeof(child_buf)); for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes); ++i) { TextureAddress myaddress(id2addr(i)); #if MERGETEX && REORDER_TREE myaddress.x &= 0x7FF; myaddress.x *= 2; int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[loc+1]; #elif REORDER_TREE int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; child_buf[i]= ((PixelOfChildren*)(ref->h_children))[loc]; #elif MERGETEX child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[myaddress.x*2+1]; #else child_buf[i]= ((PixelOfChildren*)(ref->h_children_tex_array))[myaddress.x]; #endif } CUDA_SAFE_CALL( cudaMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf))); #endif stopTimer(toboardtimer); ctx->statistics.t_tree_to_board += getTimerValue(toboardtimer); deleteTimer(toboardtimer); fprintf(stderr, "done\n"); } else { ref->d_node_tex_array = NULL; ref->d_children_tex_array = NULL; } } void dumpQueryBlockInfo(QuerySet* queries) { fprintf(stderr, "\tProcessing queries %s to %s\n", queries->h_names[0], queries->h_names[queries->count-1]); } void loadQueries(MatchContext* ctx) { QuerySet* queries = ctx->queries; queries->bytes_on_board = 0; unsigned int numQueries = queries->count; if (!ctx->on_cpu) { fprintf(stderr, "Allocating device memory for queries... "); char* toboardtimer = createTimer(); startTimer(toboardtimer); dumpQueryBlockInfo(queries); CUDA_MALLOC((void**) &queries->d_tex_array, queries->texlen); \ queries->bytes_on_board += queries->texlen; CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_tex_array, queries->h_tex_array + queries->h_addrs_tex_array[0], queries->texlen, cudaMemcpyHostToDevice)); #if QRYTEX qrytex.addressMode[0] = cudaAddressModeClamp; qrytex.filterMode = cudaFilterModePoint; qrytex.normalized = false; // access with normalized texture coordinates cudaChannelFormatDesc qryDesc = cudaCreateChannelDesc(8,0,0,0, cudaChannelFormatKindUnsigned); BIND_TEX(0, qrytex, (void*)(queries->d_tex_array), qryDesc, queries->texlen); #endif CUDA_MALLOC((void**) &queries->d_addrs_tex_array, numQueries * sizeof(int)); queries->bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_addrs_tex_array, queries->h_addrs_tex_array, numQueries * sizeof(int), cudaMemcpyHostToDevice)); CUDA_MALLOC((void**) &queries->d_lengths_array, numQueries * sizeof(int)); queries->bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_lengths_array, queries->h_lengths_array, numQueries * sizeof(int), cudaMemcpyHostToDevice)); stopTimer(toboardtimer); ctx->statistics.t_queries_to_board += getTimerValue(toboardtimer); deleteTimer(toboardtimer); fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board); } else { queries->d_addrs_tex_array = NULL; queries->d_tex_array = NULL; queries->d_lengths_array = NULL; fprintf(stderr, " allocated %ld bytes\n", 2 * numQueries*sizeof(int) + queries->texlen); } } void unloadQueries(MatchContext* ctx) { QuerySet* queries = ctx->queries; CUDA_SAFE_CALL(cudaFree(queries->d_tex_array)); queries->d_tex_array = NULL; CUDA_SAFE_CALL(cudaFree(queries->d_addrs_tex_array)); queries->d_addrs_tex_array = NULL; CUDA_SAFE_CALL(cudaFree(queries->d_lengths_array)); queries->d_lengths_array = NULL; queries->bytes_on_board = 0; } // Computes the location of the first MatchCoord for a given query. NOTE: // Do NOT use this function if COALESCED_QUERIES == 1 inline int match_coord_addrs(int qryid, int qry_addrs, int match_length) { return qry_addrs - qryid * (match_length + 1); } // Construct the offset table for a set of queries. This table will be used // by the printing functions, and if COALESCED_QUERIES == 1, by the matching // kernel. void buildCoordOffsetArray(MatchContext* ctx, int** h_coord_offset_array, unsigned int* num_coords) { int numCoords = 0; int match_length = ctx->min_match_length; int numQueries = ctx->queries->count; int* lengths = ctx->queries->h_lengths_array; int* coord_offsets = (int*)calloc(numQueries, sizeof(int)); #if COALESCED_QUERIES for (unsigned int i = 0; i < numQueries; i += WARP_SIZE) { // Every query in this warp will need at least this many coords int max_num_coords = 0; for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) { int num_coords = lengths[i + j] - match_length + 1; if ( max_num_coords < num_coords) max_num_coords = num_coords; } unsigned int block_size = max_num_coords * WARP_SIZE; for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) { ctx->results.h_coord_tex_array[i + j] = numCoords + j; } numCoords += block_size; } #else for (unsigned int i = 0; i < numQueries; ++i) { int qryoffset = ctx->queries->h_addrs_tex_array[i]; coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length); } if (numQueries > 0) { unsigned int last_qry = numQueries - 1; unsigned int last_qry_len = lengths[last_qry] - match_length + 1; numCoords = coord_offsets[last_qry] + last_qry_len; fprintf(stderr, "Need %d match coords for this result array\n", numCoords); } #endif *num_coords = numCoords; *h_coord_offset_array = coord_offsets; } void loadResultBuffer(MatchContext* ctx) { unsigned int numQueries = ctx->queries->count; assert (numQueries); char* offsettimer = createTimer(); startTimer(offsettimer); buildCoordOffsetArray(ctx, &(ctx->results.h_coord_tex_array), &(ctx->results.numCoords)); stopTimer(offsettimer); ctx->statistics.t_build_coord_offsets += getTimerValue(offsettimer); deleteTimer(offsettimer); unsigned int numCoords = ctx->results.numCoords; fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...", numQueries, numCoords*sizeof(MatchCoord) ); unsigned int boardFreeMemory = 0; unsigned int total_mem = 0; boardMemory(&boardFreeMemory, &total_mem); fprintf(stderr,"board free memory: %u total memory: %u\n", boardFreeMemory, total_mem); ctx->results.h_match_coords = (MatchCoord*) calloc( numCoords, sizeof(MatchCoord)); if (ctx->results.h_match_coords == NULL) { trap_dbg(); exit(EXIT_FAILURE); } if (!ctx->on_cpu) { char* toboardtimer = createTimer(); startTimer(toboardtimer); ctx->results.bytes_on_board = 0; CUDA_MALLOC( (void**) &ctx->results.d_match_coords, numCoords * sizeof(MatchCoord)); ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord); CUDA_SAFE_CALL( cudaMemset( (void*)ctx->results.d_match_coords, 0, numCoords * sizeof(MatchCoord))); #if COALESCED_QUERIES CUDA_MALLOC((void**) &ctx->results.d_coord_tex_array, numQueries * sizeof(int)); ctx->results.bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( cudaMemcpy((void*) ctx->results.d_coord_tex_array, ctx->results.h_coord_tex_array, numQueries * sizeof(int), cudaMemcpyHostToDevice)); #endif stopTimer(toboardtimer); ctx->statistics.t_match_coords_to_board += getTimerValue(toboardtimer); deleteTimer(toboardtimer); } else { ctx->results.d_match_coords = NULL; } fprintf(stderr, "done\n"); } void unloadResultBuffer(MatchContext* ctx) { CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords)); ctx->results.d_match_coords = NULL; ctx->results.bytes_on_board = 0; #if COALESCED_QUERIES CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords)); #endif } void transferResultsFromDevice(MatchContext* ctx) { if (!ctx->on_cpu) { char* fromboardtimer = createTimer(); startTimer(fromboardtimer); CUDA_SAFE_CALL(cudaMemcpy(ctx->results.h_match_coords, ctx->results.d_match_coords, ctx->results.numCoords * sizeof(MatchCoord), cudaMemcpyDeviceToHost) ); #if TREE_ACCESS_HISTOGRAM CUDA_SAFE_CALL(cudaMemcpy(ctx->ref->h_node_hist, ctx->ref->d_node_hist, ctx->ref->tex_node_height * ctx->ref->tex_width * sizeof(int), cudaMemcpyDeviceToHost) ); CUDA_SAFE_CALL(cudaMemcpy(ctx->ref->h_child_hist, ctx->ref->d_child_hist, ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int), cudaMemcpyDeviceToHost) ); if (ctx->statistics.node_hist_size < ctx->ref->tex_width * ctx->ref->tex_node_height) { int* temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int)); if (ctx->statistics.node_hist_size) memcpy(temp, ctx->statistics.node_hist, ctx->statistics.node_hist_size * sizeof(int)); ctx->statistics.node_hist = temp; ctx->statistics.node_hist_size = ctx->ref->tex_width * ctx->ref->tex_node_height; } if (ctx->statistics.child_hist_size < ctx->ref->tex_width * ctx->ref->tex_children_height) { temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_children_height, sizeof(int)); if (ctx->statistics.hist_size) memcpy(temp, ctx->statistics.child_hist, ctx->statistics.hist_size * sizeof(int)); ctx->statistics.child_hist = temp; ctx->statistics.child_hist_size = ctx->ref->tex_width * ctx->ref->tex_children_height; } for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) { ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i]; } for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) { ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i]; } #endif stopTimer(fromboardtimer); ctx->statistics.t_match_coords_from_board += getTimerValue(fromboardtimer); deleteTimer(fromboardtimer); } } int flushOutput(); int addToBuffer(char* string); char numbuffer[32]; MatchCoord* coordForQueryChar(MatchContext* ctx, unsigned int qryid, unsigned int qrychar) { MatchResults* results = &(ctx->results); MatchCoord* coords = results->h_match_coords; #if COALESCED_QUERIES return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE; #else return coords + results->h_coord_tex_array[qryid] + qrychar; #endif } void coordsToPrintBuffers(MatchContext* ctx, ReferencePage* page, MatchInfo** matches, Alignment** alignments, unsigned int mem_avail, unsigned int* coord_idx, unsigned int* match_idx, unsigned int* align_idx, unsigned int* nextqry, unsigned int* nextqrychar) { unsigned int numQueries = ctx->queries->count; int match_length = ctx->min_match_length; unsigned int cidx = *coord_idx; unsigned int midx = 0; unsigned int numCoords = ctx->results.numCoords; unsigned int numMatches = 0; unsigned int numAlignments = 0; int DEBUG = 0; if (DEBUG && cidx == 0) { for (int j = 0; j < numCoords; ++j) { MatchCoord * coord = ctx->results.h_match_coords+j; if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) { //fprintf(stdout, "node: %d\n", // coord->node); fprintf(stdout, "node: %d leaves:%d\n", coord->node.data, lookupNumLeaves(page, coord->node)); } } exit(0); } // How much can we fit into mem_avail? for (int j = cidx; j < numCoords; ++j) { MatchCoord* coord = ctx->results.h_match_coords + j; int queryAlignments = 0; int queryMatches = 0; if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) { int numLeaves = lookupNumLeaves(page, coord->node); queryAlignments += numLeaves; queryMatches++; } int allMatches = numMatches + queryMatches; int allAlignments = numAlignments + queryAlignments; int neededSize = allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment); if (neededSize > mem_avail || (allMatches/BLOCKSIZE) >= MAX_GRID_DIMENSION) { // adding this match won't fit on the board break; } ++cidx; numMatches = allMatches; numAlignments = allAlignments; } MatchInfo* M = (MatchInfo*)calloc(numMatches, sizeof(MatchInfo)); unsigned int alignmentOffset = 0; int qry = *nextqry; int qrychar = *nextqrychar; bool set_full = false; while (qry < numQueries) { // h_lengths_array doesn't count the 'q' at the beginning of each query int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length; while (qrychar < qlen) { if (midx >= numMatches) { set_full = true; break; } MatchCoord* coord = coordForQueryChar(ctx, qry, qrychar); if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) { MatchInfo m; m.resultsoffset = alignmentOffset; m.qrystartpos = qrychar; m.matchnode = coord->node; m.edgematch = coord->edge_match_length; m.numLeaves = lookupNumLeaves(page, m.matchnode); m.queryid = qry; alignmentOffset += m.numLeaves; M[midx++] = m; } ++qrychar; } if (set_full) break; ++qry; qrychar = 0; } *coord_idx = cidx; *match_idx = midx; *align_idx = alignmentOffset; *matches = M; *nextqry = qry; *nextqrychar = qrychar; fprintf(stderr, "Allocing %d bytes of host memory for %d alignments\n", alignmentOffset * sizeof(Alignment), numAlignments); *alignments = (struct Alignment *) calloc(alignmentOffset, sizeof(Alignment)); //cudaMallocHost((void**)alignments, numAlignments * sizeof(Alignment)); } void runPrintKernel(MatchContext* ctx, ReferencePage* page, MatchInfo* h_matches, unsigned int numMatches, Alignment* alignments, unsigned int numAlignments) { MatchInfo* d_matches; size_t matchesSize = numMatches * sizeof(MatchInfo); CUDA_MALLOC((void**) &d_matches, matchesSize); struct Alignment * d_alignments; size_t alignmentSize = numAlignments * sizeof(Alignment); CUDA_MALLOC((void**) &d_alignments, alignmentSize); CUDA_SAFE_CALL(cudaMemset((void*) d_alignments, 0, alignmentSize)); char* atimer = createTimer(); startTimer(atimer); // Copy matches to card fprintf(stderr, "prepared %d matches %d alignments\n", numMatches, numAlignments); fprintf(stderr, "Copying %d bytes to host memory for %d alignments\n", numAlignments * sizeof(Alignment), numAlignments); int DEBUG = 0; if (DEBUG) { for (int i = 0; i < numMatches; i++) { printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n", i, h_matches[i].resultsoffset, h_matches[i].queryid, h_matches[i].matchnode.data, h_matches[i].numLeaves, h_matches[i].edgematch, h_matches[i].qrystartpos); } exit(0); } CUDA_SAFE_CALL(cudaMemcpy(d_matches, h_matches, matchesSize, cudaMemcpyHostToDevice)); stopTimer(atimer); float mtime = getTimerValue(atimer); // Launch the kernel int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches; dim3 dimBlock(blocksize, 1, 1); dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1); fprintf(stderr, " Calling print kernel... "); printKernel <<< dimGrid, dimBlock, 0 >>> (d_matches, numMatches, d_alignments, #if COALESCED_QUERIES ctx->results.d_coord_tex_array, #endif #if !QRYTEX #if COALESCED_QUERIES (int*) #endif ctx->queries->d_tex_array, #endif #if !NODETEX (_PixelOfNode*)ctx->ref->d_node_tex_array, #endif #if !CHILDTEX (_PixelOfChildren*)ctx->ref->d_children_tex_array, #endif ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, page->begin, page->end, page->shadow_left, page->shadow_right, ctx->min_match_length #if TREE_ACCESS_HISTOGRAM , ctx->ref->d_node_hist, ctx->ref->d_child_hist #endif ); cudaThreadSynchronize(); cudaError_t err = cudaGetLastError(); if ( cudaSuccess != err) { fprintf(stderr, "Kernel execution failed: %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } startTimer(atimer); // Copy the results back to the host CUDA_SAFE_CALL(cudaMemcpy((void*)alignments, (void*)d_alignments, alignmentSize, cudaMemcpyDeviceToHost)); cudaThreadSynchronize(); stopTimer(atimer); float atime = getTimerValue(atimer); fprintf(stderr, "memcpy time= %f\n", atime + mtime); deleteTimer(atimer); // Cleanup CUDA_SAFE_CALL(cudaFree(d_alignments)); CUDA_SAFE_CALL(cudaFree(d_matches)); } // TODO: need reverse-complement printing support void runPrintOnCPU(MatchContext* ctx, ReferencePage* page, MatchInfo* h_matches, unsigned int numMatches, Alignment* alignments, unsigned int numAlignments) { unsigned int min_match_length = ctx->min_match_length; int* addrs = ctx->queries->h_addrs_tex_array; int* lengths = ctx->queries->h_lengths_array; char* qrychars = ctx->queries->h_tex_array; if (!numMatches) return; int qry = -1; unsigned int qrylen; for (int i = 0; i < numMatches; ++i) { MatchInfo& match = h_matches[i]; if (match.queryid != qry) { qry = match.queryid; qrylen = lengths[qry]; } if (!(match.edgematch & FRMASK)) { printAlignments(page, alignments + match.resultsoffset, #if COALESCED_QUERIES qrychars + sizeof(int) * addrs[qry], #else qrychars + addrs[qry], #endif qrylen, match.matchnode, match.qrystartpos, match.edgematch, min_match_length, 0, ctx->forwardcoordinates); } } } int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen); void getExactAlignments(MatchContext * ctx, ReferencePage * page, bool on_cpu) { assert(!ctx->reverse && !ctx->forwardreverse); unsigned int boardFreeMemory; unsigned int total_mem; if (!on_cpu) { boardMemory(&boardFreeMemory, &total_mem); fprintf(stderr, "board free memory: %u total memory: %u\n", boardFreeMemory, total_mem); } else { boardFreeMemory = 256 * 1024 * 1024; total_mem = boardFreeMemory; } #ifdef __DEVICE_EMULATION__ boardFreeMemory = 512 * 1024 * 1024; #endif boardFreeMemory -= BREATHING_ROOM; fprintf(stderr, "board free memory: %u\n", boardFreeMemory); int rTotalMatches = 0; int rTotalAlignments = 0; int totalRounds = 0; unsigned int last_coord = ctx->results.numCoords; unsigned int next_coord = 0; unsigned int nextqry = 0; unsigned int nextqrychar = 0; int lastqry = -1; while (next_coord < last_coord) { // see how many queries will fit on the board totalRounds++; unsigned int numMatches = 0; unsigned int numAlignments = 0; MatchInfo* h_matches = NULL; Alignment* h_alignments = NULL; int coord_left = next_coord; char* btimer = createTimer(); startTimer(btimer); coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments, boardFreeMemory, &next_coord, &numMatches, &numAlignments, &nextqry, &nextqrychar); stopTimer(btimer); float btime = getTimerValue(btimer); ctx->statistics.t_coords_to_buffers += btime; fprintf(stderr, "buffer prep time= %f\n", btime); deleteTimer(btimer); fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) of %d using %d matches and %d alignments\n", totalRounds, coord_left, next_coord, last_coord, numMatches, numAlignments); if (numMatches == 0) continue; char buf[256]; //assert(qryend > qrystart); rTotalAlignments += numAlignments; rTotalMatches += numMatches; if (num_bind_tex_calls > 100) { cudaThreadExit(); num_bind_tex_calls = 0; loadReference(ctx); loadQueries(ctx); } char* ktimer = createTimer(); startTimer(ktimer); if (on_cpu) { runPrintOnCPU(ctx, page, h_matches, numMatches, h_alignments, numAlignments); } else { runPrintKernel(ctx, page, h_matches, numMatches, h_alignments, numAlignments); } stopTimer(ktimer); float ktime = getTimerValue(ktimer); ctx->statistics.t_print_kernel += ktime; fprintf(stderr, "print kernel time= %f\n", ktime); deleteTimer(ktimer); // char* stimer = createTimer(); // startTimer(stimer); // mapQueriesEndToEnd(ctx, // page, // h_matches, // numMatches, // h_alignments, // numAlignments); // // stopTimer(stimer); // // float stime = getTimerValue(stimer); // fprintf(stderr, "postprocess time= %f\n", stime); // deleteTimer(stimer); //flushOutput(); //Process the alignments char* otimer = createTimer(); startTimer(otimer); for (int m = 0; m < numMatches; m++) { int base = h_matches[m].resultsoffset; for (int i = 0; i < h_matches[m].numLeaves; i++) { // See if there are any more left maximal alignments for this match if (h_alignments[base+i].left_in_ref == 0) { break; } if (h_matches[m].queryid != lastqry) { lastqry = h_matches[m].queryid; addToBuffer("> "); addToBuffer(*(ctx->queries->h_names + lastqry)); addToBuffer("\n"); } sprintf(buf, "%d\t%d\t%d\n", h_alignments[base+i].left_in_ref, h_matches[m].qrystartpos + 1, h_alignments[base+i].matchlen); addToBuffer(buf); // addMatchToBuffer(h_alignments[base+i].left_in_ref, // h_matches[m].qrystartpos + 1, // h_alignments[base+i].matchlen); } } flushOutput(); stopTimer(otimer); ctx->statistics.t_results_to_disk += getTimerValue(otimer); deleteTimer(otimer); free(h_matches); free(h_alignments); //cudaFreeHost((void*)h_alignments); } free(ctx->results.h_coord_tex_array); free(ctx->results.h_match_coords); ctx->results.h_coord_tex_array = NULL; ctx->results.h_match_coords = NULL; fprintf(stderr, "Finished processing %d matches and %d potential alignments in %d rounds\n", rTotalMatches, rTotalAlignments, totalRounds); } int getQueryBlock(MatchContext* ctx, size_t device_mem_avail) { QuerySet* queries = ctx->queries; char * queryTex = NULL; int* queryAddrs = NULL; int* queryLengths = NULL; unsigned int numQueries; unsigned int num_match_coords; size_t queryLen; char** names; fprintf(stderr, "Loading query block... "); char* queryreadtimer = createTimer(); startTimer(queryreadtimer); getQueriesTexture(queries->qfile, &queryTex, &queryLen, &queryAddrs, &names, &queryLengths, &numQueries, &num_match_coords, device_mem_avail, ctx->min_match_length, ctx->reverse || ctx->forwardreverse); stopTimer(queryreadtimer); ctx->statistics.t_queries_from_disk += getTimerValue(queryreadtimer); deleteTimer(queryreadtimer); queries->h_tex_array = queryTex; queries->count = numQueries; queries->h_addrs_tex_array = queryAddrs; queries->texlen = queryLen; queries->h_names = names; queries->h_lengths_array = queryLengths; ctx->results.numCoords = num_match_coords; fprintf(stderr, "done.\n"); return numQueries; } void destroyQueryBlock(QuerySet* queries) { free(queries->h_tex_array); queries->h_tex_array = NULL; for (int i = 0; i < queries->count; ++i) free(queries->h_names[i]); free(queries->h_names); queries->count = 0; queries->texlen = 0; free(queries->h_addrs_tex_array); queries->h_addrs_tex_array = NULL; free(queries->h_lengths_array); queries->h_lengths_array = NULL; } void resetStats(Statistics* stats) { stats->t_end_to_end = 0.0; stats->t_match_kernel = 0.0; stats->t_print_kernel = 0.0; stats->t_queries_to_board = 0.0; stats->t_match_coords_to_board = 0.0; stats->t_match_coords_from_board = 0.0; stats->t_tree_to_board = 0.0; stats->t_ref_str_to_board = 0.0; stats->t_queries_from_disk = 0.0; stats->t_ref_from_disk = 0.0; stats->t_results_to_disk = 0.0; stats->t_tree_construction = 0.0; stats->t_tree_reorder = 0.0; stats->t_tree_flatten = 0.0; stats->t_reorder_ref_str = 0.0; stats->t_build_coord_offsets = 0.0; stats->t_coords_to_buffers = 0.0; stats->bp_avg_query_length = 0.0; #if TREE_ACCESS_HISTOGRAM if (stats->node_hist_size) { free(stats->node_hist); stats->node_hist = NULL; stats->node_hist_size = 0; } if (stats->child_hist_size) { free(stats->child_hist); stats->child_hist = NULL; stats->child_hist_size = 0; } #endif } void writeStatisticsFile(Statistics* stats, char* stats_filename, char* node_hist_filename = NULL, char* child_hist_filename = NULL) { if (stats_filename) { FILE* f = fopen(stats_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename); } else { fprintf(f, "Q"); fprintf(f, ",R"); fprintf(f, ",T"); fprintf(f, ",m"); fprintf(f, ",r"); fprintf(f, ",t"); fprintf(f, ",n"); fprintf(f, ",Total"); fprintf(f, ",Match kernel"); fprintf(f, ",Print Kernel"); fprintf(f, ",Queries to board"); fprintf(f, ",Match coords to board"); fprintf(f, ",Match coords from board"); fprintf(f, ",Tree to board"); fprintf(f, ",Ref str to board"); fprintf(f, ",Queries from disk"); fprintf(f, ",Ref from disk"); fprintf(f, ",Output to disk"); fprintf(f, ",Tree construction"); fprintf(f, ",Tree reorder"); fprintf(f, ",Tree flatten"); fprintf(f, ",Ref reorder"); fprintf(f, ",Build coord table"); fprintf(f, ",Coords to buffers"); fprintf(f, ",Avg qry length"); fprintf(f, "\n"); fprintf(f, "%d", QRYTEX); fprintf(f, ",%d", REFTEX); fprintf(f, ",%d", TREETEX); fprintf(f, ",%d", MERGETEX); fprintf(f, ",%d", REORDER_REF); fprintf(f, ",%d", REORDER_TREE); fprintf(f, ",%d", RENUMBER_TREE); fprintf(f, ",%f", stats->t_end_to_end); fprintf(f, ",%f", stats->t_match_kernel); fprintf(f, ",%f", stats->t_print_kernel); fprintf(f, ",%f", stats->t_queries_to_board); fprintf(f, ",%f", stats->t_match_coords_to_board); fprintf(f, ",%f", stats->t_match_coords_from_board); fprintf(f, ",%f", stats->t_tree_to_board); fprintf(f, ",%f", stats->t_ref_str_to_board); fprintf(f, ",%f", stats->t_queries_from_disk); fprintf(f, ",%f", stats->t_ref_from_disk); fprintf(f, ",%f", stats->t_results_to_disk); fprintf(f, ",%f", stats->t_tree_construction); fprintf(f, ",%f", stats->t_tree_reorder); fprintf(f, ",%f", stats->t_tree_flatten); fprintf(f, ",%f", stats->t_reorder_ref_str); fprintf(f, ",%f", stats->t_build_coord_offsets); fprintf(f, ",%f", stats->t_coords_to_buffers); fprintf(f, ",%f", stats->bp_avg_query_length); fprintf(f,"\n"); fclose(f); } } #if TREE_ACCESS_HISTOGRAM if (node_hist_filename) { FILE* f = fopen(node_hist_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", node_hist_filename); } else { for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]); } } if (child_hist_filename) { FILE* f = fopen(child_hist_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", child_hist_filename); } else { for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]); } } float total_node_hits = 0; float tree_top_node_hits = 0; float total_child_hits = 0; float tree_top_child_hits = 0; for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) { total_node_hits +=ctx->statistics.node_hist[i]; if (i < 256) { tree_top_node_hits += ctx->statistics.node_hist[i]; } } for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) { total_child_hits +=ctx->statistics.child_hist[i]; if (i < 256) { tree_top_child_hits += ctx->statistics.child_hist[i]; } } fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n",(int)tree_top_node_hits, (int)total_node_hits, tree_top_node_hits /total_node_hits); fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n",(int)tree_top_child_hits, (int)total_child_hits, tree_top_child_hits /total_child_hits); #endif } void matchOnCPU(MatchContext* ctx, bool doRC) { //TODO: CPU is matching is disabled. if (doRC) { // Match the reverse complement of the queries to the ref computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, REVERSE); } else { computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, FORWARD); } } void matchOnGPU(MatchContext* ctx, bool doRC) { int numQueries = ctx->queries->count; int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries; dim3 dimBlock(blocksize, 1, 1); dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1); // Match the reverse complement of the queries to the ref if (doRC) { //TODO: GPU RC is disabled mummergpuRCKernel <<< dimGrid, dimBlock, 0 >>> (ctx->results.d_match_coords, ctx->queries->d_tex_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length); } else { mummergpuKernel <<< dimGrid, dimBlock, 0 >>> (ctx->results.d_match_coords, #if COALESCED_QUERIES ctx->results.d_coord_tex_array, #endif #if !QRYTEX #if COALESCED_QUERIES (int*) #endif ctx->queries->d_tex_array, #endif #if !NODETEX (_PixelOfNode*)(ctx->ref->d_node_tex_array), #endif #if !CHILDTEX (_PixelOfChildren*)(ctx->ref->d_children_tex_array), #endif #if !REFTEX (char*)ctx->ref->d_ref_array, #endif ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length #if TREE_ACCESS_HISTOGRAM , ctx->ref->d_node_hist, ctx->ref->d_child_hist #endif ); } // check if kernel execution generated an error cudaError_t err = cudaGetLastError(); if ( cudaSuccess != err) { fprintf(stderr, "Kernel execution failed: %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void getMatchResults(MatchContext* ctx, unsigned int page_num) { transferResultsFromDevice(ctx); } void matchQueryBlockToReferencePage(MatchContext* ctx, ReferencePage* page, bool reverse_complement) { char* ktimer = createTimer(); fprintf(stderr, "Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n", ctx->queries->bytes_on_board, ctx->ref->bytes_on_board, ctx->results.bytes_on_board); startTimer(ktimer); if (ctx->on_cpu) { matchOnCPU(ctx, reverse_complement); } else { matchOnGPU(ctx, reverse_complement); cudaThreadSynchronize(); } stopTimer(ktimer); float ktime = getTimerValue(ktimer); ctx->statistics.t_match_kernel += ktime; fprintf(stderr, "match kernel time= %f\n", ktime); deleteTimer(ktimer); getMatchResults(ctx, page->id); unloadResultBuffer(ctx); } int matchSubset(MatchContext* ctx, ReferencePage* page) { loadQueries(ctx); fprintf(stderr, "Matching queries %s - %s against ref coords %d - %d\n", ctx->queries->h_names[0], ctx->queries->h_names[ctx->queries->count - 1], page->begin, page->end); loadResultBuffer(ctx); // TODO: renable RC support by calling this twice /w reverse/fwdreverse // idiom. matchQueryBlockToReferencePage(ctx, page, false); if (USE_PRINT_KERNEL && !ctx->on_cpu) { getExactAlignments(ctx, page, false); } else { getExactAlignments(ctx, page, true); } flushOutput(); unloadQueries(ctx); return 0; } int getFreeDeviceMemory(bool on_cpu) { unsigned int free_mem = 0; unsigned int total_mem = 0; // We have to 'prime' CUDA by making an allocation here. cuMemGetInfo // will return zeroes until we do a malloc. int * p = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&p, sizeof(int))); CUDA_SAFE_CALL(cudaFree(p)); if (!on_cpu) { boardMemory(&free_mem, &total_mem); fprintf(stderr, "board free memory: %u total memory: %u\n", free_mem, total_mem); } else { total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX } return free_mem; } int matchQueriesToReferencePage(MatchContext* ctx, ReferencePage* page) { fprintf(stderr, "Beginning reference page %p\n", page); int free_mem = getFreeDeviceMemory(ctx->on_cpu); int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM; ctx->ref = &(page->ref); loadReference(ctx); while (getQueryBlock(ctx, available_mem)) { matchSubset(ctx, page); ctx->statistics.bp_avg_query_length = ctx->queries->texlen / (float)(ctx->queries->count) - 2; destroyQueryBlock(ctx->queries); if (num_bind_tex_calls > 100) { cudaThreadExit(); num_bind_tex_calls = 0; loadReference(ctx); } } unloadReferenceString(ctx->ref); unloadReferenceTree(ctx); lseek(ctx->queries->qfile, 0, SEEK_SET); return 0; } void initReferencePages( MatchContext* ctx , int* num_pages, ReferencePage** pages_out) { unsigned int bases_in_ref = ctx->full_ref_len - 3; unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref; unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size); fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n", num_reference_pages, bases_in_ref, page_size); unsigned int page_overlap = MAX_QUERY_LEN + 1; ReferencePage* pages = (ReferencePage*) calloc(num_reference_pages, sizeof(ReferencePage)); pages[0].begin = 1; pages[0].end = pages[0].begin + page_size + ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning pages[0].shadow_left = -1; pages[0].id = 0; for (int i = 1; i < num_reference_pages - 1; ++i) { pages[i].begin = pages[i - 1].end - page_overlap; pages[i].end = pages[i].begin + page_size + page_overlap; pages[i - 1].shadow_right = pages[i].begin; pages[i].shadow_left = pages[i-1].end; pages[i].id = i; } if (num_reference_pages > 1) { int last_page = num_reference_pages - 1; pages[last_page].begin = pages[last_page - 1].end - page_overlap; pages[last_page].end = ctx->full_ref_len - 1; pages[last_page - 1].shadow_right = pages[last_page].begin; pages[last_page].shadow_right = -1; pages[last_page].shadow_left = pages[last_page - 1].end; pages[last_page].id = last_page; } *pages_out = pages; *num_pages = num_reference_pages; } int streamReferenceAgainstQueries(MatchContext* ctx) { int num_reference_pages = 0; ReferencePage* pages = NULL; initReferencePages(ctx, &num_reference_pages, &pages); buildReferenceTexture(&(pages[0].ref), ctx->full_ref, pages[0].begin, pages[0].end, ctx->min_match_length, ctx->dotfilename, ctx->texfilename, &(ctx->statistics)); matchQueriesToReferencePage(ctx, &pages[0]); destroyReference(&(pages[0].ref)); for (int i = 1; i < num_reference_pages - 1; ++i) { buildReferenceTexture(&(pages[i].ref), ctx->full_ref, pages[i].begin, pages[i].end, ctx->min_match_length, NULL, NULL, &(ctx->statistics)); matchQueriesToReferencePage(ctx, &pages[i]); destroyReference(&(pages[i].ref)); } if (num_reference_pages > 1) { int last_page = num_reference_pages - 1; buildReferenceTexture(&(pages[last_page].ref), ctx->full_ref, pages[last_page].begin, pages[last_page].end, ctx->min_match_length, NULL, NULL, &(ctx->statistics)); matchQueriesToReferencePage(ctx, &pages[last_page]); destroyReference(&(pages[last_page].ref)); } free(pages); return 0; } extern "C" int matchQueries(MatchContext* ctx) { assert(sizeof(struct PixelOfNode) == sizeof(uint4)); assert(sizeof(struct PixelOfChildren) == sizeof(uint4)); #if TREE_ACCESS_HISTOGRAM ctx->statistics.node_hist_size = 0; ctx->statistics.child_hist_size = 0; #endif resetStats(&(ctx->statistics)); char* ttimer = createTimer(); startTimer(ttimer); int ret; fprintf(stderr, "Streaming reference pages against all queries\n"); ret = streamReferenceAgainstQueries(ctx); stopTimer(ttimer); ctx->statistics.t_end_to_end += getTimerValue(ttimer); deleteTimer(ttimer); writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out", "child_hist.out"); return ret; }
a007b2720c440265ebc4ffef547aabf8d96c8e58.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "support.h" #define TILE_SIZE 8 int main(int argc, char ** argv ) { Timer timer; hipError_t cuda_ret; printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); int count=0; int length=80; int width=80; int Matrix[length][width]; for(int i=0; i < length ; i++) { for (int j=0; j< width ; j++){ if((rand()%5) <= 3){Matrix[i][j]=0;} else { Matrix[i][j]=rand()%60; if (Matrix[i][j] > 0){count++;}} // printf(" %d\t", Matrix[i][j]); } } int X[length]; for(int k=0; k<length; k++){ if((rand()%5) >= 3){X[k]=0;} else { X[k]=rand()%60;} //printf(" \n X val %d\t", X[k]); } int rows[count]; int cols[count]; int vals[count]; int sparse[count]; int k=1; int f=1; int t=0; int row_size=sizeof(Matrix)/sizeof(Matrix[0]); int col_size=sizeof(Matrix[0])/sizeof(Matrix[0][0]); //printf("Row size of Matrix is %d \n", row_size); //printf("column size of Matrix is %d ", col_size); for (int i = 0; i < row_size; i++) { for(int j=0; j <col_size ; j++){ if (Matrix[i][j] != 0) { rows[t] = i; cols[t] = j; vals[t] = Matrix[i][j]; //printf("\n %d \t %d \t %d", rows[t], cols[t], vals[t]); t++;} } } int r_start, r_end; sparse[0]=0; for (int row = 0; row < count-1 ; row++){ r_start=rows[row]; r_end=rows[row+1]; if (r_end > r_start){ sparse[k]=f; k++;} else { f++; } } sparse[k]=count; int colsizer=sizeof(vals)/sizeof(vals[0]); //printf("\n your col size is %d",colsizer); sparse[k] = colsizer; /*for( int l=0 ; l < count ; l++) { printf("\n the sparse is %d \n", sparse[l]);}*/ int y[length]; for (int row = 0; row < length; row++){ r_start=sparse[row]; r_end=sparse[row+1]; int dot=0; for(int elem=r_start; elem<r_end; elem++){ dot += vals[elem]*X[cols[elem]]; //printf(" \n prime result is %d ", dot); } y[row] += dot ; //printf(" \n your multiply is %d ", y[row]); } stopTime(&timer); printf("%f s\n", elapsedTime(timer)); /*for (int s=0 ; s<length ; s++){ printf(" \n your Y value is %d ", y[s]);}*/ return 0; }
a007b2720c440265ebc4ffef547aabf8d96c8e58.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "support.h" #define TILE_SIZE 8 int main(int argc, char ** argv ) { Timer timer; cudaError_t cuda_ret; printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); int count=0; int length=80; int width=80; int Matrix[length][width]; for(int i=0; i < length ; i++) { for (int j=0; j< width ; j++){ if((rand()%5) <= 3){Matrix[i][j]=0;} else { Matrix[i][j]=rand()%60; if (Matrix[i][j] > 0){count++;}} // printf(" %d\t", Matrix[i][j]); } } int X[length]; for(int k=0; k<length; k++){ if((rand()%5) >= 3){X[k]=0;} else { X[k]=rand()%60;} //printf(" \n X val %d\t", X[k]); } int rows[count]; int cols[count]; int vals[count]; int sparse[count]; int k=1; int f=1; int t=0; int row_size=sizeof(Matrix)/sizeof(Matrix[0]); int col_size=sizeof(Matrix[0])/sizeof(Matrix[0][0]); //printf("Row size of Matrix is %d \n", row_size); //printf("column size of Matrix is %d ", col_size); for (int i = 0; i < row_size; i++) { for(int j=0; j <col_size ; j++){ if (Matrix[i][j] != 0) { rows[t] = i; cols[t] = j; vals[t] = Matrix[i][j]; //printf("\n %d \t %d \t %d", rows[t], cols[t], vals[t]); t++;} } } int r_start, r_end; sparse[0]=0; for (int row = 0; row < count-1 ; row++){ r_start=rows[row]; r_end=rows[row+1]; if (r_end > r_start){ sparse[k]=f; k++;} else { f++; } } sparse[k]=count; int colsizer=sizeof(vals)/sizeof(vals[0]); //printf("\n your col size is %d",colsizer); sparse[k] = colsizer; /*for( int l=0 ; l < count ; l++) { printf("\n the sparse is %d \n", sparse[l]);}*/ int y[length]; for (int row = 0; row < length; row++){ r_start=sparse[row]; r_end=sparse[row+1]; int dot=0; for(int elem=r_start; elem<r_end; elem++){ dot += vals[elem]*X[cols[elem]]; //printf(" \n prime result is %d ", dot); } y[row] += dot ; //printf(" \n your multiply is %d ", y[row]); } stopTime(&timer); printf("%f s\n", elapsedTime(timer)); /*for (int s=0 ; s<length ; s++){ printf(" \n your Y value is %d ", y[s]);}*/ return 0; }
a075635a582f2fbfedc4e374a2b6ad5de705043a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2019 by Contributors * \file multi_sum_sq.cu * \brief vectorized sums of squares norm over multiple arrays operators * \author Clement Fuji Tsang, Andrei Ivanov, Moises Hernandez */ #include "./multi_sum_sq-inl.h" #include <hipcub/hipcub.hpp> #define ILP 4 #define BLOCK_LIMIT 320 #define ARRAY_LIMIT 110 namespace mxnet { namespace op { // Shamelessly gotten from: // https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_apply.cuh // https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_l2norm_kernel.cu // https://github.com/NVIDIA/apex/blob/master/csrc/type_shim.h const int chunk_size = 32768; template <typename DType> struct MultiSumSqKernelParam { DType* addresses[ARRAY_LIMIT]; int sizes[ARRAY_LIMIT]; unsigned char block_to_tensor[BLOCK_LIMIT]; int block_to_chunk[BLOCK_LIMIT]; int max_chunks_per_tensor = -1; }; template<typename DType> __device__ __forceinline__ DType ReduceBlockIntoLanes(DType* x, DType val) { int tid = threadIdx.x; int block_size = blockDim.x; if (block_size >= 64) { x[tid] = val; __syncthreads(); } #pragma unroll for (int i = (block_size >> 1); i >= 64; i >>= 1) { if (tid < i) x[tid] = x[tid] + x[tid+i]; __syncthreads(); } DType final; if (tid < 32) { if (block_size >= 64) final = x[tid] + x[tid+32]; else final = val; #pragma unroll for (int i = 16; i >= 1; i >>= 1) final = final + __shfl_down_sync(0xffffffff, final, i); } return final; } template<typename DType> __global__ void MultiSumSqKernel(int chunk_size, MultiSumSqKernelParam<DType> param, float* block_reductions, int start_tensor_id) { const int tensor_loc = param.block_to_tensor[blockIdx.x]; const int chunk_len = param.block_to_chunk[blockIdx.x] * chunk_size; const int n = param.sizes[tensor_loc] - chunk_len; const DType* x = param.addresses[tensor_loc] + chunk_len; const auto i_max = n <= chunk_size ? n : chunk_size; __shared__ float vals[512]; // Non-divergent exit condition for __syncthreads, not necessary here float val = 0; for (int i_start = 0; i_start < i_max; i_start += blockDim.x * ILP) { int i = i_start + threadIdx.x; #pragma unroll for (int ii = 0; ii < ILP && i < i_max; ++ii, i += blockDim.x) { const auto incoming_val = static_cast<float>(x[i]); val += incoming_val * incoming_val; } } const float final = ReduceBlockIntoLanes(vals, val); if (threadIdx.x == 0) { block_reductions[(start_tensor_id + tensor_loc) * param.max_chunks_per_tensor + param.block_to_chunk[blockIdx.x]] = final; } } template<typename DType> __global__ void GlobalReductionKernel(MultiSumSqKernelParam<DType> param, float* block_reductions, float* output) { __shared__ float vals[512]; float* reductions_this_tensor = block_reductions + blockIdx.x * param.max_chunks_per_tensor; float val = 0; for (int i = threadIdx.x; i < param.max_chunks_per_tensor; i += blockDim.x) val += reductions_this_tensor[i]; float final = ReduceBlockIntoLanes(vals, val); if (threadIdx.x == 0) output[blockIdx.x] = final; } template<> size_t GetRequiredStorageMultiSumSq<gpu>(const std::vector<TBlob> &inputs, int* param_max_chunks_per_tensor) { // find max num of chunks in tensors int max_chunks_per_tensor = -1; for (size_t t = 0; t < inputs.size(); t++) { int chunks_this_tensor = (inputs[t].shape_.Size() + chunk_size - 1) / chunk_size; if (chunks_this_tensor > max_chunks_per_tensor) max_chunks_per_tensor = chunks_this_tensor; } if (param_max_chunks_per_tensor != NULL) *param_max_chunks_per_tensor = max_chunks_per_tensor; return inputs.size() * max_chunks_per_tensor * sizeof(float); } template<> void MultiSumSqRun<gpu>(const std::vector<TBlob> &inputs, int n_inputs, float *out_ptr, const OpContext &ctx) { const int block_size = 512; using namespace mxnet_op; auto s = ctx.get_stream<gpu>(); auto stream = mshadow::Stream<gpu>::GetStream(s); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { MultiSumSqKernelParam<DType> param; size_t workspace_size = GetRequiredStorageMultiSumSq<gpu>(inputs, &param.max_chunks_per_tensor); Tensor<gpu, 1, char> workspace = ctx.requested[multi_sum_sq::kTempSpace].get_space_typed<gpu, 1, char>( Shape1(workspace_size), s); Tensor<gpu, 1, float> block_reductions(reinterpret_cast<float*>(&workspace[0]), Shape1(n_inputs * param.max_chunks_per_tensor), s); CUDA_CALL(hipMemsetAsync(block_reductions.dptr_, 0, n_inputs * param.max_chunks_per_tensor* sizeof(float), stream)); int loc_block_info = 0; // position in param.block_to_tensor and param.block_to_chunck int loc_tensor_info = 0; // position in param.sizes and param.addresses int start_tensor_id = 0; for (int t = 0; t < n_inputs; t++, loc_tensor_info++) { // array index in inputs param.sizes[loc_tensor_info] = inputs[t].shape_.Size(); param.addresses[loc_tensor_info] = inputs[t].FlatTo2D<gpu, DType>(s).dptr_; const int chunks_this_tensor = (inputs[t].shape_.Size() - 1) / chunk_size; for (int chunk = 0; chunk <= chunks_this_tensor; ++chunk) { // array chunk index param.block_to_tensor[loc_block_info] = loc_tensor_info; param.block_to_chunk[loc_block_info] = chunk; loc_block_info++; const bool last_curr_chunk = chunk == chunks_this_tensor; const bool tensors_full = last_curr_chunk && loc_tensor_info == (ARRAY_LIMIT-1); const bool blocks_full = (loc_block_info == BLOCK_LIMIT); const bool last_chunk = last_curr_chunk && t == n_inputs - 1; if (!(tensors_full || blocks_full || last_chunk)) continue; hipLaunchKernelGGL(( MultiSumSqKernel), dim3(loc_block_info), dim3(block_size), 0, stream, chunk_size, param, block_reductions.dptr_, start_tensor_id); MSHADOW_CUDA_POST_KERNEL_CHECK(MultiSumSqKernel); loc_block_info = 0; if (last_curr_chunk) { // if you start from a new tensor loc_tensor_info = -1; start_tensor_id = t + 1; } else { // if you start from the same tensor param.sizes[0] = param.sizes[loc_tensor_info]; param.addresses[0] = param.addresses[loc_tensor_info]; loc_tensor_info = 0; start_tensor_id = t; } } } // Global reduction hipLaunchKernelGGL(( GlobalReductionKernel), dim3(n_inputs), dim3(block_size), 0, stream, param, block_reductions.dptr_, out_ptr); }); } NNVM_REGISTER_OP(multi_sum_sq) .set_attr<FCompute>("FCompute<gpu>", MultiSumSq<gpu>); } // namespace op } // namespace mxnet
a075635a582f2fbfedc4e374a2b6ad5de705043a.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2019 by Contributors * \file multi_sum_sq.cu * \brief vectorized sums of squares norm over multiple arrays operators * \author Clement Fuji Tsang, Andrei Ivanov, Moises Hernandez */ #include "./multi_sum_sq-inl.h" #include <cub/cub.cuh> #define ILP 4 #define BLOCK_LIMIT 320 #define ARRAY_LIMIT 110 namespace mxnet { namespace op { // Shamelessly gotten from: // https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_apply.cuh // https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_l2norm_kernel.cu // https://github.com/NVIDIA/apex/blob/master/csrc/type_shim.h const int chunk_size = 32768; template <typename DType> struct MultiSumSqKernelParam { DType* addresses[ARRAY_LIMIT]; int sizes[ARRAY_LIMIT]; unsigned char block_to_tensor[BLOCK_LIMIT]; int block_to_chunk[BLOCK_LIMIT]; int max_chunks_per_tensor = -1; }; template<typename DType> __device__ __forceinline__ DType ReduceBlockIntoLanes(DType* x, DType val) { int tid = threadIdx.x; int block_size = blockDim.x; if (block_size >= 64) { x[tid] = val; __syncthreads(); } #pragma unroll for (int i = (block_size >> 1); i >= 64; i >>= 1) { if (tid < i) x[tid] = x[tid] + x[tid+i]; __syncthreads(); } DType final; if (tid < 32) { if (block_size >= 64) final = x[tid] + x[tid+32]; else final = val; #pragma unroll for (int i = 16; i >= 1; i >>= 1) final = final + __shfl_down_sync(0xffffffff, final, i); } return final; } template<typename DType> __global__ void MultiSumSqKernel(int chunk_size, MultiSumSqKernelParam<DType> param, float* block_reductions, int start_tensor_id) { const int tensor_loc = param.block_to_tensor[blockIdx.x]; const int chunk_len = param.block_to_chunk[blockIdx.x] * chunk_size; const int n = param.sizes[tensor_loc] - chunk_len; const DType* x = param.addresses[tensor_loc] + chunk_len; const auto i_max = n <= chunk_size ? n : chunk_size; __shared__ float vals[512]; // Non-divergent exit condition for __syncthreads, not necessary here float val = 0; for (int i_start = 0; i_start < i_max; i_start += blockDim.x * ILP) { int i = i_start + threadIdx.x; #pragma unroll for (int ii = 0; ii < ILP && i < i_max; ++ii, i += blockDim.x) { const auto incoming_val = static_cast<float>(x[i]); val += incoming_val * incoming_val; } } const float final = ReduceBlockIntoLanes(vals, val); if (threadIdx.x == 0) { block_reductions[(start_tensor_id + tensor_loc) * param.max_chunks_per_tensor + param.block_to_chunk[blockIdx.x]] = final; } } template<typename DType> __global__ void GlobalReductionKernel(MultiSumSqKernelParam<DType> param, float* block_reductions, float* output) { __shared__ float vals[512]; float* reductions_this_tensor = block_reductions + blockIdx.x * param.max_chunks_per_tensor; float val = 0; for (int i = threadIdx.x; i < param.max_chunks_per_tensor; i += blockDim.x) val += reductions_this_tensor[i]; float final = ReduceBlockIntoLanes(vals, val); if (threadIdx.x == 0) output[blockIdx.x] = final; } template<> size_t GetRequiredStorageMultiSumSq<gpu>(const std::vector<TBlob> &inputs, int* param_max_chunks_per_tensor) { // find max num of chunks in tensors int max_chunks_per_tensor = -1; for (size_t t = 0; t < inputs.size(); t++) { int chunks_this_tensor = (inputs[t].shape_.Size() + chunk_size - 1) / chunk_size; if (chunks_this_tensor > max_chunks_per_tensor) max_chunks_per_tensor = chunks_this_tensor; } if (param_max_chunks_per_tensor != NULL) *param_max_chunks_per_tensor = max_chunks_per_tensor; return inputs.size() * max_chunks_per_tensor * sizeof(float); } template<> void MultiSumSqRun<gpu>(const std::vector<TBlob> &inputs, int n_inputs, float *out_ptr, const OpContext &ctx) { const int block_size = 512; using namespace mxnet_op; auto s = ctx.get_stream<gpu>(); auto stream = mshadow::Stream<gpu>::GetStream(s); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { MultiSumSqKernelParam<DType> param; size_t workspace_size = GetRequiredStorageMultiSumSq<gpu>(inputs, &param.max_chunks_per_tensor); Tensor<gpu, 1, char> workspace = ctx.requested[multi_sum_sq::kTempSpace].get_space_typed<gpu, 1, char>( Shape1(workspace_size), s); Tensor<gpu, 1, float> block_reductions(reinterpret_cast<float*>(&workspace[0]), Shape1(n_inputs * param.max_chunks_per_tensor), s); CUDA_CALL(cudaMemsetAsync(block_reductions.dptr_, 0, n_inputs * param.max_chunks_per_tensor* sizeof(float), stream)); int loc_block_info = 0; // position in param.block_to_tensor and param.block_to_chunck int loc_tensor_info = 0; // position in param.sizes and param.addresses int start_tensor_id = 0; for (int t = 0; t < n_inputs; t++, loc_tensor_info++) { // array index in inputs param.sizes[loc_tensor_info] = inputs[t].shape_.Size(); param.addresses[loc_tensor_info] = inputs[t].FlatTo2D<gpu, DType>(s).dptr_; const int chunks_this_tensor = (inputs[t].shape_.Size() - 1) / chunk_size; for (int chunk = 0; chunk <= chunks_this_tensor; ++chunk) { // array chunk index param.block_to_tensor[loc_block_info] = loc_tensor_info; param.block_to_chunk[loc_block_info] = chunk; loc_block_info++; const bool last_curr_chunk = chunk == chunks_this_tensor; const bool tensors_full = last_curr_chunk && loc_tensor_info == (ARRAY_LIMIT-1); const bool blocks_full = (loc_block_info == BLOCK_LIMIT); const bool last_chunk = last_curr_chunk && t == n_inputs - 1; if (!(tensors_full || blocks_full || last_chunk)) continue; MultiSumSqKernel<<<loc_block_info, block_size, 0, stream>>> (chunk_size, param, block_reductions.dptr_, start_tensor_id); MSHADOW_CUDA_POST_KERNEL_CHECK(MultiSumSqKernel); loc_block_info = 0; if (last_curr_chunk) { // if you start from a new tensor loc_tensor_info = -1; start_tensor_id = t + 1; } else { // if you start from the same tensor param.sizes[0] = param.sizes[loc_tensor_info]; param.addresses[0] = param.addresses[loc_tensor_info]; loc_tensor_info = 0; start_tensor_id = t; } } } // Global reduction GlobalReductionKernel<<<n_inputs, block_size, 0, stream>>> (param, block_reductions.dptr_, out_ptr); }); } NNVM_REGISTER_OP(multi_sum_sq) .set_attr<FCompute>("FCompute<gpu>", MultiSumSq<gpu>); } // namespace op } // namespace mxnet
8e5091285194bb3d5c615d0d793328021e78745f.hip
// !!! This is a file automatically generated by hipify!!! // #include <torch/extension.h> #include <torch/types.h> #include <ATen/ATen.h> using namespace at; #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <iostream> // Cuda tensor accessor definitions // restrict pointer traits piroritize speed over memory consumption #define TensorAcc4R PackedTensorAccessor<scalar_t,4,RestrictPtrTraits,int32_t> #define TensorAcc5R PackedTensorAccessor<scalar_t,5,RestrictPtrTraits,int32_t> #define WITHIN_BOUNDS(x, y, H, W) (x >= 0 && x < H && y >= 0 && y < W) #define THREADS_FORWARD 32 #define THREADS_BACKWARD 5 namespace { template <typename scalar_t> __global__ void correlation_cuda_forward_kernel( const TensorAcc4R rInput1, const TensorAcc4R rInput2, TensorAcc5R output, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW) { const int iH = rInput1.size(1); const int iW = rInput1.size(2); const int C = rInput1.size(3); const int n = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int thread = threadIdx.x; const int start_i = -padH + h * dH; const int start_j = -padW + w * dW; const int patchRadH = dilation_patchH * (patchH - 1) / 2; const int patchRadW = dilation_patchW * (patchW - 1) / 2; __shared__ scalar_t prod_sum[THREADS_FORWARD]; for(int ph = 0; ph < patchH; ++ph){ int ph_dilated = ph * dilation_patchH - patchRadH; for(int pw = 0; pw < patchW; ++pw){ int pw_dilated = pw * dilation_patchW - patchRadW; prod_sum[thread] = 0; for (int i=0; i<kH; ++i){ int i1 = start_i + i; int i2 = i1 + ph_dilated; if WITHIN_BOUNDS(i1, i2, iH, iH){ for (int j=0; j<kW; ++j){ int j1 = start_j + j; int j2 = j1 + pw_dilated; if WITHIN_BOUNDS(j1, j2, iW, iW){ for (int c=thread; c<C; c += THREADS_FORWARD){ scalar_t v1 = rInput1[n][i1][j1][c]; scalar_t v2 = rInput2[n][i2][j2][c]; prod_sum[thread] += v1 * v2; } } } } } // accumulate __syncthreads(); if (thread == 0) { scalar_t reduce_sum = 0; for (int index = 0; index < THREADS_FORWARD; ++index) { reduce_sum += prod_sum[index]; } output[n][ph][pw][h][w] = reduce_sum; } } } } template <typename scalar_t> __global__ void correlation_cuda_backward_kernel_input1( const TensorAcc5R gradOutput, const TensorAcc4R input2, TensorAcc4R gradInput1, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW, int batch) { const int iH = input2.size(2); const int iW = input2.size(3); const int H = gradOutput.size(3); const int W = gradOutput.size(4); const int patchRadH = (patchH - 1) / 2; const int patchRadW = (patchW - 1) / 2; const int n = batch; const int c = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int ph_off = threadIdx.x; const int pw_off = threadIdx.y; const int h_2 = h + padH; const int w_2 = w + padW; const int start_i2 = h_2 / dH; const int start_j2 = w_2 / dW; /*we perform a module but since we have the quotient, we can cheat a bit*/ const int h_off = h_2 - start_i2 * dH; const int w_off = w_2 - start_j2 * dW; __shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD]; prod_sum[ph_off][pw_off] = 0; for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) { int i1 = h + dilation_patchH * (ph - patchRadH); for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) { int j1 = w + dilation_patchW * (pw - patchRadW); if WITHIN_BOUNDS(i1, j1, iH, iW) { scalar_t val = input2[n][c][i1][j1]; for(int tmp1 = h_off, i = 0; tmp1 < kH; tmp1 += dH, ++i) { int i2 = start_i2 - i; for(int tmp2 = w_off, j = 0; tmp2 < kW; tmp2 += dW, ++j) { int j2 = start_j2 - j; if WITHIN_BOUNDS(i2, j2, H, W) { prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val; } } } } } } __syncthreads(); if (ph_off == 0 && pw_off == 0){ scalar_t reduce_sum =0; for (int ph = 0; ph < THREADS_BACKWARD; ++ph){ for (int pw = 0; pw < THREADS_BACKWARD; ++pw){ reduce_sum += prod_sum[ph][pw]; } } gradInput1[n][c][h][w] = reduce_sum; } } template <typename scalar_t> __global__ void correlation_cuda_backward_kernel_input2( const TensorAcc5R gradOutput, const TensorAcc4R input1, TensorAcc4R gradInput2, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW, int batch) { const int iH = input1.size(2); const int iW = input1.size(3); const int patchRadH = (patchH - 1) / 2; const int patchRadW = (patchW - 1) / 2; const int H = gradOutput.size(3); const int W = gradOutput.size(4); const int n = batch; const int c = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int ph_off = threadIdx.x; const int pw_off = threadIdx.y; __shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD]; prod_sum[ph_off][pw_off] = 0; for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) { int i1 = h - dilation_patchH * (ph - patchRadH); for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) { int j1 = w - dilation_patchW * (pw - patchRadW); if WITHIN_BOUNDS(i1, j1, iH, iW) { scalar_t val = input1[n][c][i1][j1]; const int h_2 = i1 + padH; const int w_2 = j1 + padW; const int start_i2 = h_2 / dH; const int start_j2 = w_2 / dW; const int h_off = h_2 - start_i2 * dH; const int w_off = w_2 - start_j2 * dW; for(int tmp1 = h_off, i = 0; tmp1 < kH; tmp1 += dH, ++i) { int i2 = start_i2 - i; for(int tmp2 = w_off, j = 0; tmp2 < kW; tmp2 += dW, ++j) { int j2 = start_j2 - j; if WITHIN_BOUNDS(i2, j2, H, W) { prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val; } } } } } } __syncthreads(); if (ph_off == 0 && pw_off == 0){ scalar_t reduce_sum =0; for (int ph = 0; ph < THREADS_BACKWARD; ++ph){ for (int pw = 0; pw < THREADS_BACKWARD; ++pw){ reduce_sum += prod_sum[ph][pw]; } } gradInput2[n][c][h][w] = reduce_sum; } } } torch::Tensor correlation_cuda_forward( torch::Tensor input1, torch::Tensor input2, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW) { const int batch_size = input1.size(0); const int iH = input1.size(2); const int iW = input1.size(3); const auto oH = (iH + 2 * padH - kH) / dH + 1; const auto oW = (iW + 2 * padW - kW) / dW + 1; auto output = torch::zeros({batch_size, patchH, patchW, oH, oW}, input1.options()); auto trInput1 = input1.permute({0, 2, 3, 1}).contiguous(); auto trInput2 = input2.permute({0, 2, 3, 1}).contiguous(); const int threads = THREADS_FORWARD; const dim3 blocks(batch_size, oH, oW); AT_DISPATCH_FLOATING_TYPES(input1.type(), "correlation_forward_cuda", ([&] { TensorAcc4R trInput1_acc = trInput1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R trInput2_acc = trInput2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc5R output_acc = output.packed_accessor<scalar_t,5,RestrictPtrTraits,int32_t>(); hipLaunchKernelGGL(( correlation_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, trInput1_acc, trInput2_acc, output_acc, kH, kW, patchH, patchW, padH, padW, dilation_patchH, dilation_patchW, dH, dW); })); return output; } std::vector<torch::Tensor> correlation_cuda_backward( torch::Tensor input1, torch::Tensor input2, torch::Tensor gradOutput, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW) { auto gradInput1 = torch::zeros_like(input1); auto gradInput2 = torch::zeros_like(input2); const int batch_size = input1.size(0); const int iH = input1.size(2); const int iW = input1.size(3); const int C = input1.size(1); const dim3 blocks(C, iH, iW); const dim3 threads(THREADS_BACKWARD, THREADS_BACKWARD); AT_DISPATCH_FLOATING_TYPES(input1.type(), "correlation_backward_cuda", ([&] { TensorAcc4R input1_acc = input1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R input2_acc = input2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R gradInput1_acc = gradInput1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R gradInput2_acc = gradInput2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc5R gradOutput_acc = gradOutput.packed_accessor<scalar_t,5,RestrictPtrTraits,int32_t>(); for (int n = 0; n < batch_size; ++n){ hipLaunchKernelGGL(( correlation_cuda_backward_kernel_input1<scalar_t>), dim3(blocks), dim3(threads), 0, 0, gradOutput_acc, input2_acc, gradInput1_acc, kH, kW, patchH, patchW, padH, padW, dilation_patchH, dilation_patchW, dH, dW, n); } for (int n = 0; n < batch_size; ++n){ hipLaunchKernelGGL(( correlation_cuda_backward_kernel_input2<scalar_t>), dim3(blocks), dim3(threads), 0, 0, gradOutput_acc, input1_acc, gradInput2_acc, kH, kW, patchH, patchW, padH, padW, dilation_patchH, dilation_patchW, dH, dW, n); } })); return {gradInput1, gradInput2}; }
8e5091285194bb3d5c615d0d793328021e78745f.cu
// #include <torch/extension.h> #include <torch/types.h> #include <ATen/ATen.h> using namespace at; #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <iostream> // Cuda tensor accessor definitions // restrict pointer traits piroritize speed over memory consumption #define TensorAcc4R PackedTensorAccessor<scalar_t,4,RestrictPtrTraits,int32_t> #define TensorAcc5R PackedTensorAccessor<scalar_t,5,RestrictPtrTraits,int32_t> #define WITHIN_BOUNDS(x, y, H, W) (x >= 0 && x < H && y >= 0 && y < W) #define THREADS_FORWARD 32 #define THREADS_BACKWARD 5 namespace { template <typename scalar_t> __global__ void correlation_cuda_forward_kernel( const TensorAcc4R rInput1, const TensorAcc4R rInput2, TensorAcc5R output, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW) { const int iH = rInput1.size(1); const int iW = rInput1.size(2); const int C = rInput1.size(3); const int n = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int thread = threadIdx.x; const int start_i = -padH + h * dH; const int start_j = -padW + w * dW; const int patchRadH = dilation_patchH * (patchH - 1) / 2; const int patchRadW = dilation_patchW * (patchW - 1) / 2; __shared__ scalar_t prod_sum[THREADS_FORWARD]; for(int ph = 0; ph < patchH; ++ph){ int ph_dilated = ph * dilation_patchH - patchRadH; for(int pw = 0; pw < patchW; ++pw){ int pw_dilated = pw * dilation_patchW - patchRadW; prod_sum[thread] = 0; for (int i=0; i<kH; ++i){ int i1 = start_i + i; int i2 = i1 + ph_dilated; if WITHIN_BOUNDS(i1, i2, iH, iH){ for (int j=0; j<kW; ++j){ int j1 = start_j + j; int j2 = j1 + pw_dilated; if WITHIN_BOUNDS(j1, j2, iW, iW){ for (int c=thread; c<C; c += THREADS_FORWARD){ scalar_t v1 = rInput1[n][i1][j1][c]; scalar_t v2 = rInput2[n][i2][j2][c]; prod_sum[thread] += v1 * v2; } } } } } // accumulate __syncthreads(); if (thread == 0) { scalar_t reduce_sum = 0; for (int index = 0; index < THREADS_FORWARD; ++index) { reduce_sum += prod_sum[index]; } output[n][ph][pw][h][w] = reduce_sum; } } } } template <typename scalar_t> __global__ void correlation_cuda_backward_kernel_input1( const TensorAcc5R gradOutput, const TensorAcc4R input2, TensorAcc4R gradInput1, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW, int batch) { const int iH = input2.size(2); const int iW = input2.size(3); const int H = gradOutput.size(3); const int W = gradOutput.size(4); const int patchRadH = (patchH - 1) / 2; const int patchRadW = (patchW - 1) / 2; const int n = batch; const int c = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int ph_off = threadIdx.x; const int pw_off = threadIdx.y; const int h_2 = h + padH; const int w_2 = w + padW; const int start_i2 = h_2 / dH; const int start_j2 = w_2 / dW; /*we perform a module but since we have the quotient, we can cheat a bit*/ const int h_off = h_2 - start_i2 * dH; const int w_off = w_2 - start_j2 * dW; __shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD]; prod_sum[ph_off][pw_off] = 0; for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) { int i1 = h + dilation_patchH * (ph - patchRadH); for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) { int j1 = w + dilation_patchW * (pw - patchRadW); if WITHIN_BOUNDS(i1, j1, iH, iW) { scalar_t val = input2[n][c][i1][j1]; for(int tmp1 = h_off, i = 0; tmp1 < kH; tmp1 += dH, ++i) { int i2 = start_i2 - i; for(int tmp2 = w_off, j = 0; tmp2 < kW; tmp2 += dW, ++j) { int j2 = start_j2 - j; if WITHIN_BOUNDS(i2, j2, H, W) { prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val; } } } } } } __syncthreads(); if (ph_off == 0 && pw_off == 0){ scalar_t reduce_sum =0; for (int ph = 0; ph < THREADS_BACKWARD; ++ph){ for (int pw = 0; pw < THREADS_BACKWARD; ++pw){ reduce_sum += prod_sum[ph][pw]; } } gradInput1[n][c][h][w] = reduce_sum; } } template <typename scalar_t> __global__ void correlation_cuda_backward_kernel_input2( const TensorAcc5R gradOutput, const TensorAcc4R input1, TensorAcc4R gradInput2, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW, int batch) { const int iH = input1.size(2); const int iW = input1.size(3); const int patchRadH = (patchH - 1) / 2; const int patchRadW = (patchW - 1) / 2; const int H = gradOutput.size(3); const int W = gradOutput.size(4); const int n = batch; const int c = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int ph_off = threadIdx.x; const int pw_off = threadIdx.y; __shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD]; prod_sum[ph_off][pw_off] = 0; for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) { int i1 = h - dilation_patchH * (ph - patchRadH); for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) { int j1 = w - dilation_patchW * (pw - patchRadW); if WITHIN_BOUNDS(i1, j1, iH, iW) { scalar_t val = input1[n][c][i1][j1]; const int h_2 = i1 + padH; const int w_2 = j1 + padW; const int start_i2 = h_2 / dH; const int start_j2 = w_2 / dW; const int h_off = h_2 - start_i2 * dH; const int w_off = w_2 - start_j2 * dW; for(int tmp1 = h_off, i = 0; tmp1 < kH; tmp1 += dH, ++i) { int i2 = start_i2 - i; for(int tmp2 = w_off, j = 0; tmp2 < kW; tmp2 += dW, ++j) { int j2 = start_j2 - j; if WITHIN_BOUNDS(i2, j2, H, W) { prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val; } } } } } } __syncthreads(); if (ph_off == 0 && pw_off == 0){ scalar_t reduce_sum =0; for (int ph = 0; ph < THREADS_BACKWARD; ++ph){ for (int pw = 0; pw < THREADS_BACKWARD; ++pw){ reduce_sum += prod_sum[ph][pw]; } } gradInput2[n][c][h][w] = reduce_sum; } } } torch::Tensor correlation_cuda_forward( torch::Tensor input1, torch::Tensor input2, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW) { const int batch_size = input1.size(0); const int iH = input1.size(2); const int iW = input1.size(3); const auto oH = (iH + 2 * padH - kH) / dH + 1; const auto oW = (iW + 2 * padW - kW) / dW + 1; auto output = torch::zeros({batch_size, patchH, patchW, oH, oW}, input1.options()); auto trInput1 = input1.permute({0, 2, 3, 1}).contiguous(); auto trInput2 = input2.permute({0, 2, 3, 1}).contiguous(); const int threads = THREADS_FORWARD; const dim3 blocks(batch_size, oH, oW); AT_DISPATCH_FLOATING_TYPES(input1.type(), "correlation_forward_cuda", ([&] { TensorAcc4R trInput1_acc = trInput1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R trInput2_acc = trInput2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc5R output_acc = output.packed_accessor<scalar_t,5,RestrictPtrTraits,int32_t>(); correlation_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( trInput1_acc, trInput2_acc, output_acc, kH, kW, patchH, patchW, padH, padW, dilation_patchH, dilation_patchW, dH, dW); })); return output; } std::vector<torch::Tensor> correlation_cuda_backward( torch::Tensor input1, torch::Tensor input2, torch::Tensor gradOutput, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilation_patchH, int dilation_patchW, int dH, int dW) { auto gradInput1 = torch::zeros_like(input1); auto gradInput2 = torch::zeros_like(input2); const int batch_size = input1.size(0); const int iH = input1.size(2); const int iW = input1.size(3); const int C = input1.size(1); const dim3 blocks(C, iH, iW); const dim3 threads(THREADS_BACKWARD, THREADS_BACKWARD); AT_DISPATCH_FLOATING_TYPES(input1.type(), "correlation_backward_cuda", ([&] { TensorAcc4R input1_acc = input1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R input2_acc = input2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R gradInput1_acc = gradInput1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc4R gradInput2_acc = gradInput2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>(); TensorAcc5R gradOutput_acc = gradOutput.packed_accessor<scalar_t,5,RestrictPtrTraits,int32_t>(); for (int n = 0; n < batch_size; ++n){ correlation_cuda_backward_kernel_input1<scalar_t><<<blocks, threads>>>( gradOutput_acc, input2_acc, gradInput1_acc, kH, kW, patchH, patchW, padH, padW, dilation_patchH, dilation_patchW, dH, dW, n); } for (int n = 0; n < batch_size; ++n){ correlation_cuda_backward_kernel_input2<scalar_t><<<blocks, threads>>>( gradOutput_acc, input1_acc, gradInput2_acc, kH, kW, patchH, patchW, padH, padW, dilation_patchH, dilation_patchW, dH, dW, n); } })); return {gradInput1, gradInput2}; }
80a705136fda7b823d9b6cbd39eec8d10ac37a6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ __constant__ float M_c[FILTER_SIZE][FILTER_SIZE]; __global__ void convolution(Matrix N, Matrix P) { /******************************************************************** Determine input and output indexes of each thread Load a tile of the input image to shared memory Apply the filter on the input image tile Write the compute values to the output image at the correct indexes ********************************************************************/ //INSERT KERNEL CODE HERE int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y * TILE_SIZE + ty; int col_o = blockIdx.x * TILE_SIZE + tx; int row_i = row_o - 2; int col_i = col_o - 2; float output = 0.0f; __shared__ float Ns[BLOCK_SIZE][BLOCK_SIZE]; if ((row_i >= 0) && (row_i < N.height) && (col_i >= 0) && (col_i < N.width)) Ns[ty][tx] = N.elements[row_i*N.width + col_i]; else Ns[ty][tx] = 0.0f; __syncthreads(); if (ty < TILE_SIZE && tx < TILE_SIZE) { for (int i = 0; i < FILTER_SIZE; i++) { for (int j = 0; j < FILTER_SIZE; j++) { output += M_c[i][j] * Ns[i+ty][j+tx]; } } if (row_o < P.height && col_o < P.width) P.elements[row_o * P.width + col_o] = output; } }
80a705136fda7b823d9b6cbd39eec8d10ac37a6f.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ __constant__ float M_c[FILTER_SIZE][FILTER_SIZE]; __global__ void convolution(Matrix N, Matrix P) { /******************************************************************** Determine input and output indexes of each thread Load a tile of the input image to shared memory Apply the filter on the input image tile Write the compute values to the output image at the correct indexes ********************************************************************/ //INSERT KERNEL CODE HERE int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y * TILE_SIZE + ty; int col_o = blockIdx.x * TILE_SIZE + tx; int row_i = row_o - 2; int col_i = col_o - 2; float output = 0.0f; __shared__ float Ns[BLOCK_SIZE][BLOCK_SIZE]; if ((row_i >= 0) && (row_i < N.height) && (col_i >= 0) && (col_i < N.width)) Ns[ty][tx] = N.elements[row_i*N.width + col_i]; else Ns[ty][tx] = 0.0f; __syncthreads(); if (ty < TILE_SIZE && tx < TILE_SIZE) { for (int i = 0; i < FILTER_SIZE; i++) { for (int j = 0; j < FILTER_SIZE; j++) { output += M_c[i][j] * Ns[i+ty][j+tx]; } } if (row_o < P.height && col_o < P.width) P.elements[row_o * P.width + col_o] = output; } }
79b8df396d6f49d037334e195192fc37e570585e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 Microsoft * Licensed under The Apache-2.0 License [see LICENSE for details] * \file deformable_psroi_pooling.cu * \brief * \author Yi Li, Guodong Zhang, Jifeng Dai */ #include "./deformable_psroi_pooling_v2-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #include "../../common/cuda_utils.h" #include "../mxnet_op.h" #define DeformablePSROIPOOLING_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { template <typename DType> __device__ DType bilinear_interp_v2( const DType* data, const DType x, const DType y, const int width, const int height) { int x1 = floor(x); int x2 = ceil(x); int y1 = floor(y); int y2 = ceil(y); DType dist_x = static_cast<DType>(x - x1); DType dist_y = static_cast<DType>(y - y1); DType value11 = data[y1*width + x1]; DType value12 = data[y2*width + x1]; DType value21 = data[y1*width + x2]; DType value22 = data[y2*width + x2]; DType value = (1 - dist_x)*(1 - dist_y)*value11 + (1 - dist_x)*dist_y*value12 + dist_x*(1 - dist_y)*value21 + dist_x*dist_y*value22; return value; } template <typename DType> __global__ void DeformablePSROIPoolv2ForwardKernel( const int count, const DType** stage_bottom_data, const DType* stage_spatial_scale, const int channels, const int* stage_height, const int* stage_width, const int pooled_height, const int pooled_width, const DType* bottom_rois, const DType* bottom_trans, const bool no_trans, const DType trans_std, const int sample_per_part, const int output_dim, const int group_size, const int part_size, const int num_classes, const int channels_each_class, DType* top_data, DType* top_count) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 6; int roi_stage_ind = offset_bottom_rois[0]; const DType* bottom_data = stage_bottom_data[roi_stage_ind]; const int height = stage_height[roi_stage_ind]; const int width = stage_width[roi_stage_ind]; const DType spatial_scale = stage_spatial_scale[roi_stage_ind]; int roi_batch_ind = offset_bottom_rois[1]; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[3])) * spatial_scale - 0.5; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[5]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part); DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part); int part_h = floor(static_cast<DType>(ph) / pooled_height*part_size); int part_w = floor(static_cast<DType>(pw) / pooled_width*part_size); int class_id = ctop / channels_each_class; DType trans_x = no_trans ? static_cast<DType>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std; DType trans_y = no_trans ? static_cast<DType>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std; DType wstart = static_cast<DType>(pw)* bin_size_w + roi_start_w; wstart += trans_x * roi_width; DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h; hstart += trans_y * roi_height; DType sum = 0; int count = 0; int gw = floor(static_cast<DType>(pw) * group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { DType w = wstart + iw*sub_bin_size_w; DType h = hstart + ih*sub_bin_size_h; // bilinear interpolation if (w<-0.5 || w>width - 0.5 || h<-0.5 || h>height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop*group_size + gh)*group_size + gw; DType val = bilinear_interp_v2(offset_bottom_data + c*height*width, w, h, width, height); sum += val; count++; } } top_data[index] = count == 0 ? static_cast<DType>(0) : sum / count; top_count[index] = count; } } template<typename DType> inline void DeformablePSROIPoolv2Forward(const Tensor<gpu, 4, DType> &out, const std::vector<Tensor<gpu, 4, DType>> &datas, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &trans, const Tensor<gpu, 4, DType> &top_count, const bool no_trans, const nnvm::Tuple<float> spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { // LOG(INFO) << "DeformablePSROIPoolForward"; std::vector<const DType*> _stage_bottom_data; for (int i = 0; i < datas.size(); i++) _stage_bottom_data.push_back(datas[i].dptr_); const DType **stage_bottom_data; DeformablePSROIPOOLING_CUDA_CHECK(hipMalloc(&stage_bottom_data, sizeof(const DType*) * _stage_bottom_data.size())); DeformablePSROIPOOLING_CUDA_CHECK(hipMemcpy(stage_bottom_data, &_stage_bottom_data[0], sizeof(const DType*) * _stage_bottom_data.size(), hipMemcpyHostToDevice)); const DType *bottom_rois = bbox.dptr_; const DType *bottom_trans = no_trans ? NULL : trans.dptr_; DType *top_data = out.dptr_; DType *top_count_data = top_count.dptr_; const int count = out.shape_.Size(); const int channels = datas[0].size(1); std::vector<int> _stage_height; for (int i = 0; i < datas.size(); i++) _stage_height.push_back(datas[i].size(2)); int *stage_height; DeformablePSROIPOOLING_CUDA_CHECK(hipMalloc(&stage_height, sizeof(int) * _stage_height.size())); DeformablePSROIPOOLING_CUDA_CHECK(hipMemcpy(stage_height, &_stage_height[0], sizeof(int) * _stage_height.size(), hipMemcpyHostToDevice)); std::vector<int> _stage_width; for (int i = 0; i < datas.size(); i++) _stage_width.push_back(datas[i].size(3)); int *stage_width; DeformablePSROIPOOLING_CUDA_CHECK(hipMalloc(&stage_width, sizeof(int) * _stage_width.size())); DeformablePSROIPOOLING_CUDA_CHECK(hipMemcpy(stage_width, &_stage_width[0], sizeof(int) * _stage_width.size(), hipMemcpyHostToDevice)); const int pooled_height = pooled_size; const int pooled_width = pooled_size; const int num_classes = no_trans ? 1 : trans.size(1) / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; std::vector<DType> _stage_spatial_scale; for (int i = 0; i < spatial_scale.ndim(); i++) _stage_spatial_scale.push_back(spatial_scale[i]); DType *stage_spatial_scale; DeformablePSROIPOOLING_CUDA_CHECK(hipMalloc(&stage_spatial_scale, sizeof(DType) * _stage_spatial_scale.size())); DeformablePSROIPOOLING_CUDA_CHECK(hipMemcpy(stage_spatial_scale, &_stage_spatial_scale[0], sizeof(DType) * _stage_spatial_scale.size(), hipMemcpyHostToDevice)); hipStream_t stream = Stream<gpu>::GetStream(out.stream_); hipLaunchKernelGGL(( DeformablePSROIPoolv2ForwardKernel<DType>), dim3(mxnet::op::mxnet_op::cuda_get_num_blocks(count)), dim3(kBaseThreadNum), 0, stream, count, stage_bottom_data, stage_spatial_scale, channels, stage_height, stage_width, pooled_height, pooled_width, bottom_rois, bottom_trans, no_trans, trans_std, sample_per_part, output_dim, group_size, part_size, num_classes, channels_each_class, top_data, top_count_data); DeformablePSROIPOOLING_CUDA_CHECK(hipPeekAtLastError()); DeformablePSROIPOOLING_CUDA_CHECK(hipFree(stage_bottom_data)); DeformablePSROIPOOLING_CUDA_CHECK(hipFree(stage_height)); DeformablePSROIPOOLING_CUDA_CHECK(hipFree(stage_width)); DeformablePSROIPOOLING_CUDA_CHECK(hipFree(stage_spatial_scale)); } template <typename DType> __global__ void DeformablePSROIPoolv2BackwardAccKernel( const int count, const DType* top_diff, const DType* top_count, const int num_rois, const DType* stage_spatial_scale, const int channels, const int* stage_height, const int* stage_width, const int pooled_height, const int pooled_width, const int output_dim, DType** stage_bottom_data_diff, DType* bottom_trans_diff, const DType** stage_bottom_data, const DType* bottom_rois, const DType* bottom_trans, const bool no_trans, const DType trans_std, const int sample_per_part, const int group_size, const int part_size, const int num_classes, const int channels_each_class) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 6; int roi_stage_ind = offset_bottom_rois[0]; DType* bottom_data_diff = stage_bottom_data_diff[roi_stage_ind]; const DType* bottom_data = stage_bottom_data[roi_stage_ind]; const int height = stage_height[roi_stage_ind]; const int width = stage_width[roi_stage_ind]; const DType spatial_scale = stage_spatial_scale[roi_stage_ind]; int roi_batch_ind = offset_bottom_rois[1]; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[3])) * spatial_scale - 0.5; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[5]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part); DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part); int part_h = floor(static_cast<DType>(ph) / pooled_height*part_size); int part_w = floor(static_cast<DType>(pw) / pooled_width*part_size); int class_id = ctop / channels_each_class; DType trans_x = no_trans ? static_cast<DType>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std; DType trans_y = no_trans ? static_cast<DType>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std; DType wstart = static_cast<DType>(pw)* bin_size_w + roi_start_w; wstart += trans_x * roi_width; DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h; hstart += trans_y * roi_height; if (top_count[index] <= 0) { continue; } DType diff_val = top_diff[index] / top_count[index]; const DType* offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width; DType* offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width; int gw = floor(static_cast<DType>(pw)* group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { DType w = wstart + iw*sub_bin_size_w; DType h = hstart + ih*sub_bin_size_h; // bilinear interpolation if (w<-0.5 || w>width - 0.5 || h<-0.5 || h>height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop*group_size + gh)*group_size + gw; // backward on feature int x0 = floor(w); int x1 = ceil(w); int y0 = floor(h); int y1 = ceil(h); DType dist_x = w - x0, dist_y = h - y0; DType q00 = (1 - dist_x)*(1 - dist_y); DType q01 = (1 - dist_x)*dist_y; DType q10 = dist_x*(1 - dist_y); DType q11 = dist_x*dist_y; int bottom_index_base = c * height *width; atomicAdd(offset_bottom_data_diff + bottom_index_base + y0*width + x0, q00*diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1*width + x0, q01*diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y0*width + x1, q10*diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1*width + x1, q11*diff_val); if (no_trans) { continue; } DType U00 = offset_bottom_data[bottom_index_base + y0*width + x0]; DType U01 = offset_bottom_data[bottom_index_base + y1*width + x0]; DType U10 = offset_bottom_data[bottom_index_base + y0*width + x1]; DType U11 = offset_bottom_data[bottom_index_base + y1*width + x1]; DType diff_x = (U11*dist_y + U10*(1 - dist_y) - U01*dist_y - U00*(1 - dist_y)) *trans_std*diff_val; diff_x *= roi_width; DType diff_y = (U11*dist_x + U01*(1 - dist_x) - U10*dist_x - U00*(1 - dist_x)) *trans_std*diff_val; diff_y *= roi_height; atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x); atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y); } } } } template<typename DType> inline void DeformablePSROIPoolv2BackwardAcc(const std::vector<Tensor<gpu, 4, DType>> &in_grads, const Tensor<gpu, 4, DType> &trans_grad, const Tensor<gpu, 4, DType> &out_grad, const std::vector<Tensor<gpu, 4, DType>> &datas, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &trans, const Tensor<gpu, 4, DType> &top_count, const bool no_trans, const nnvm::Tuple<float> spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { // LOG(INFO) << "DeformablePSROIPoolBackward"; const DType *top_diff = out_grad.dptr_; std::vector<const DType*> _stage_bottom_data; for (int i = 0; i < datas.size(); i++) _stage_bottom_data.push_back(datas[i].dptr_); const DType **stage_bottom_data; DeformablePSROIPOOLING_CUDA_CHECK(hipMalloc(&stage_bottom_data, sizeof(const DType*) * _stage_bottom_data.size())); DeformablePSROIPOOLING_CUDA_CHECK(hipMemcpy(stage_bottom_data, &_stage_bottom_data[0], sizeof(const DType*) * _stage_bottom_data.size(), hipMemcpyHostToDevice)); const DType *bottom_rois = bbox.dptr_; const DType *bottom_trans = no_trans ? NULL : trans.dptr_; std::vector<DType*> _stage_bottom_data_diff; for (int i = 0; i < in_grads.size(); i++) _stage_bottom_data_diff.push_back(in_grads[i].dptr_); DType **stage_bottom_data_diff; DeformablePSROIPOOLING_CUDA_CHECK(hipMalloc(&stage_bottom_data_diff, sizeof(DType*) * _stage_bottom_data_diff.size())); DeformablePSROIPOOLING_CUDA_CHECK(hipMemcpy(stage_bottom_data_diff, &_stage_bottom_data_diff[0], sizeof(DType*) * _stage_bottom_data_diff.size(), hipMemcpyHostToDevice)); DType *bottom_trans_diff = no_trans ? NULL : trans_grad.dptr_; const DType *top_count_data = top_count.dptr_; const int count = out_grad.shape_.Size(); const int num_rois = bbox.size(0); const int channels = in_grads[0].size(1); std::vector<int> _stage_height; for (int i = 0; i < in_grads.size(); i++) _stage_height.push_back(in_grads[i].size(2)); int *stage_height; DeformablePSROIPOOLING_CUDA_CHECK(hipMalloc(&stage_height, sizeof(int) * _stage_height.size())); DeformablePSROIPOOLING_CUDA_CHECK(hipMemcpy(stage_height, &_stage_height[0], sizeof(int) * _stage_height.size(), hipMemcpyHostToDevice)); std::vector<int> _stage_width; for (int i = 0; i < in_grads.size(); i++) _stage_width.push_back(in_grads[i].size(3)); int *stage_width; DeformablePSROIPOOLING_CUDA_CHECK(hipMalloc(&stage_width, sizeof(int) * _stage_width.size())); DeformablePSROIPOOLING_CUDA_CHECK(hipMemcpy(stage_width, &_stage_width[0], sizeof(int) * _stage_width.size(), hipMemcpyHostToDevice)); const int pooled_height = pooled_size; const int pooled_width = pooled_size; const int num_classes = no_trans ? 1 : trans_grad.size(1) / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; std::vector<DType> _stage_spatial_scale; for (int i = 0; i < spatial_scale.ndim(); i++) _stage_spatial_scale.push_back(spatial_scale[i]); DType *stage_spatial_scale; DeformablePSROIPOOLING_CUDA_CHECK(hipMalloc(&stage_spatial_scale, sizeof(DType) * _stage_spatial_scale.size())); DeformablePSROIPOOLING_CUDA_CHECK(hipMemcpy(stage_spatial_scale, &_stage_spatial_scale[0], sizeof(DType) * _stage_spatial_scale.size(), hipMemcpyHostToDevice)); hipStream_t stream = Stream<gpu>::GetStream(in_grads[0].stream_); DeformablePSROIPoolv2BackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream >> >( count, top_diff, top_count_data, num_rois, stage_spatial_scale, channels, stage_height, stage_width, pooled_height, pooled_width, output_dim, stage_bottom_data_diff, bottom_trans_diff, stage_bottom_data, bottom_rois, bottom_trans, no_trans, trans_std, sample_per_part, group_size, part_size, num_classes, channels_each_class); DeformablePSROIPOOLING_CUDA_CHECK(hipPeekAtLastError()); DeformablePSROIPOOLING_CUDA_CHECK(hipFree(stage_bottom_data)); DeformablePSROIPOOLING_CUDA_CHECK(hipFree(stage_bottom_data_diff)); DeformablePSROIPOOLING_CUDA_CHECK(hipFree(stage_height)); DeformablePSROIPOOLING_CUDA_CHECK(hipFree(stage_width)); DeformablePSROIPOOLING_CUDA_CHECK(hipFree(stage_spatial_scale)); } } // namespace cuda template<typename DType> inline void DeformablePSROIPoolv2Forward(const Tensor<gpu, 4, DType> &out, const std::vector<Tensor<gpu, 4, DType>> &datas, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &trans, const Tensor<gpu, 4, DType> &top_count, const bool no_trans, const nnvm::Tuple<float> spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { cuda::DeformablePSROIPoolv2Forward(out, datas, bbox, trans, top_count, no_trans, spatial_scale, output_dim, group_size, pooled_size, part_size, sample_per_part, trans_std); } template<typename DType> inline void DeformablePSROIPoolv2BackwardAcc(const std::vector<Tensor<gpu, 4, DType>> &in_grads, const Tensor<gpu, 4, DType> &trans_grad, const Tensor<gpu, 4, DType> &out_grad, const std::vector<Tensor<gpu, 4, DType>> &datas, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &trans, const Tensor<gpu, 4, DType> &top_count, const bool no_trans, const nnvm::Tuple<float> spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { cuda::DeformablePSROIPoolv2BackwardAcc(in_grads, trans_grad, out_grad, datas, bbox, trans, top_count, no_trans, spatial_scale, output_dim, group_size, pooled_size, part_size, sample_per_part, trans_std); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(DeformablePSROIPoolingv2Param param, int dtype) { Operator* op = nullptr; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new DeformablePSROIPoolingv2Op<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
79b8df396d6f49d037334e195192fc37e570585e.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 Microsoft * Licensed under The Apache-2.0 License [see LICENSE for details] * \file deformable_psroi_pooling.cu * \brief * \author Yi Li, Guodong Zhang, Jifeng Dai */ #include "./deformable_psroi_pooling_v2-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #include "../../common/cuda_utils.h" #include "../mxnet_op.h" #define DeformablePSROIPOOLING_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { template <typename DType> __device__ DType bilinear_interp_v2( const DType* data, const DType x, const DType y, const int width, const int height) { int x1 = floor(x); int x2 = ceil(x); int y1 = floor(y); int y2 = ceil(y); DType dist_x = static_cast<DType>(x - x1); DType dist_y = static_cast<DType>(y - y1); DType value11 = data[y1*width + x1]; DType value12 = data[y2*width + x1]; DType value21 = data[y1*width + x2]; DType value22 = data[y2*width + x2]; DType value = (1 - dist_x)*(1 - dist_y)*value11 + (1 - dist_x)*dist_y*value12 + dist_x*(1 - dist_y)*value21 + dist_x*dist_y*value22; return value; } template <typename DType> __global__ void DeformablePSROIPoolv2ForwardKernel( const int count, const DType** stage_bottom_data, const DType* stage_spatial_scale, const int channels, const int* stage_height, const int* stage_width, const int pooled_height, const int pooled_width, const DType* bottom_rois, const DType* bottom_trans, const bool no_trans, const DType trans_std, const int sample_per_part, const int output_dim, const int group_size, const int part_size, const int num_classes, const int channels_each_class, DType* top_data, DType* top_count) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 6; int roi_stage_ind = offset_bottom_rois[0]; const DType* bottom_data = stage_bottom_data[roi_stage_ind]; const int height = stage_height[roi_stage_ind]; const int width = stage_width[roi_stage_ind]; const DType spatial_scale = stage_spatial_scale[roi_stage_ind]; int roi_batch_ind = offset_bottom_rois[1]; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[3])) * spatial_scale - 0.5; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[5]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part); DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part); int part_h = floor(static_cast<DType>(ph) / pooled_height*part_size); int part_w = floor(static_cast<DType>(pw) / pooled_width*part_size); int class_id = ctop / channels_each_class; DType trans_x = no_trans ? static_cast<DType>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std; DType trans_y = no_trans ? static_cast<DType>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std; DType wstart = static_cast<DType>(pw)* bin_size_w + roi_start_w; wstart += trans_x * roi_width; DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h; hstart += trans_y * roi_height; DType sum = 0; int count = 0; int gw = floor(static_cast<DType>(pw) * group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { DType w = wstart + iw*sub_bin_size_w; DType h = hstart + ih*sub_bin_size_h; // bilinear interpolation if (w<-0.5 || w>width - 0.5 || h<-0.5 || h>height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop*group_size + gh)*group_size + gw; DType val = bilinear_interp_v2(offset_bottom_data + c*height*width, w, h, width, height); sum += val; count++; } } top_data[index] = count == 0 ? static_cast<DType>(0) : sum / count; top_count[index] = count; } } template<typename DType> inline void DeformablePSROIPoolv2Forward(const Tensor<gpu, 4, DType> &out, const std::vector<Tensor<gpu, 4, DType>> &datas, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &trans, const Tensor<gpu, 4, DType> &top_count, const bool no_trans, const nnvm::Tuple<float> spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { // LOG(INFO) << "DeformablePSROIPoolForward"; std::vector<const DType*> _stage_bottom_data; for (int i = 0; i < datas.size(); i++) _stage_bottom_data.push_back(datas[i].dptr_); const DType **stage_bottom_data; DeformablePSROIPOOLING_CUDA_CHECK(cudaMalloc(&stage_bottom_data, sizeof(const DType*) * _stage_bottom_data.size())); DeformablePSROIPOOLING_CUDA_CHECK(cudaMemcpy(stage_bottom_data, &_stage_bottom_data[0], sizeof(const DType*) * _stage_bottom_data.size(), cudaMemcpyHostToDevice)); const DType *bottom_rois = bbox.dptr_; const DType *bottom_trans = no_trans ? NULL : trans.dptr_; DType *top_data = out.dptr_; DType *top_count_data = top_count.dptr_; const int count = out.shape_.Size(); const int channels = datas[0].size(1); std::vector<int> _stage_height; for (int i = 0; i < datas.size(); i++) _stage_height.push_back(datas[i].size(2)); int *stage_height; DeformablePSROIPOOLING_CUDA_CHECK(cudaMalloc(&stage_height, sizeof(int) * _stage_height.size())); DeformablePSROIPOOLING_CUDA_CHECK(cudaMemcpy(stage_height, &_stage_height[0], sizeof(int) * _stage_height.size(), cudaMemcpyHostToDevice)); std::vector<int> _stage_width; for (int i = 0; i < datas.size(); i++) _stage_width.push_back(datas[i].size(3)); int *stage_width; DeformablePSROIPOOLING_CUDA_CHECK(cudaMalloc(&stage_width, sizeof(int) * _stage_width.size())); DeformablePSROIPOOLING_CUDA_CHECK(cudaMemcpy(stage_width, &_stage_width[0], sizeof(int) * _stage_width.size(), cudaMemcpyHostToDevice)); const int pooled_height = pooled_size; const int pooled_width = pooled_size; const int num_classes = no_trans ? 1 : trans.size(1) / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; std::vector<DType> _stage_spatial_scale; for (int i = 0; i < spatial_scale.ndim(); i++) _stage_spatial_scale.push_back(spatial_scale[i]); DType *stage_spatial_scale; DeformablePSROIPOOLING_CUDA_CHECK(cudaMalloc(&stage_spatial_scale, sizeof(DType) * _stage_spatial_scale.size())); DeformablePSROIPOOLING_CUDA_CHECK(cudaMemcpy(stage_spatial_scale, &_stage_spatial_scale[0], sizeof(DType) * _stage_spatial_scale.size(), cudaMemcpyHostToDevice)); cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); DeformablePSROIPoolv2ForwardKernel<DType><<<mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream>>>( count, stage_bottom_data, stage_spatial_scale, channels, stage_height, stage_width, pooled_height, pooled_width, bottom_rois, bottom_trans, no_trans, trans_std, sample_per_part, output_dim, group_size, part_size, num_classes, channels_each_class, top_data, top_count_data); DeformablePSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError()); DeformablePSROIPOOLING_CUDA_CHECK(cudaFree(stage_bottom_data)); DeformablePSROIPOOLING_CUDA_CHECK(cudaFree(stage_height)); DeformablePSROIPOOLING_CUDA_CHECK(cudaFree(stage_width)); DeformablePSROIPOOLING_CUDA_CHECK(cudaFree(stage_spatial_scale)); } template <typename DType> __global__ void DeformablePSROIPoolv2BackwardAccKernel( const int count, const DType* top_diff, const DType* top_count, const int num_rois, const DType* stage_spatial_scale, const int channels, const int* stage_height, const int* stage_width, const int pooled_height, const int pooled_width, const int output_dim, DType** stage_bottom_data_diff, DType* bottom_trans_diff, const DType** stage_bottom_data, const DType* bottom_rois, const DType* bottom_trans, const bool no_trans, const DType trans_std, const int sample_per_part, const int group_size, const int part_size, const int num_classes, const int channels_each_class) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 6; int roi_stage_ind = offset_bottom_rois[0]; DType* bottom_data_diff = stage_bottom_data_diff[roi_stage_ind]; const DType* bottom_data = stage_bottom_data[roi_stage_ind]; const int height = stage_height[roi_stage_ind]; const int width = stage_width[roi_stage_ind]; const DType spatial_scale = stage_spatial_scale[roi_stage_ind]; int roi_batch_ind = offset_bottom_rois[1]; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[3])) * spatial_scale - 0.5; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[5]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part); DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part); int part_h = floor(static_cast<DType>(ph) / pooled_height*part_size); int part_w = floor(static_cast<DType>(pw) / pooled_width*part_size); int class_id = ctop / channels_each_class; DType trans_x = no_trans ? static_cast<DType>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std; DType trans_y = no_trans ? static_cast<DType>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std; DType wstart = static_cast<DType>(pw)* bin_size_w + roi_start_w; wstart += trans_x * roi_width; DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h; hstart += trans_y * roi_height; if (top_count[index] <= 0) { continue; } DType diff_val = top_diff[index] / top_count[index]; const DType* offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width; DType* offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width; int gw = floor(static_cast<DType>(pw)* group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { DType w = wstart + iw*sub_bin_size_w; DType h = hstart + ih*sub_bin_size_h; // bilinear interpolation if (w<-0.5 || w>width - 0.5 || h<-0.5 || h>height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop*group_size + gh)*group_size + gw; // backward on feature int x0 = floor(w); int x1 = ceil(w); int y0 = floor(h); int y1 = ceil(h); DType dist_x = w - x0, dist_y = h - y0; DType q00 = (1 - dist_x)*(1 - dist_y); DType q01 = (1 - dist_x)*dist_y; DType q10 = dist_x*(1 - dist_y); DType q11 = dist_x*dist_y; int bottom_index_base = c * height *width; atomicAdd(offset_bottom_data_diff + bottom_index_base + y0*width + x0, q00*diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1*width + x0, q01*diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y0*width + x1, q10*diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1*width + x1, q11*diff_val); if (no_trans) { continue; } DType U00 = offset_bottom_data[bottom_index_base + y0*width + x0]; DType U01 = offset_bottom_data[bottom_index_base + y1*width + x0]; DType U10 = offset_bottom_data[bottom_index_base + y0*width + x1]; DType U11 = offset_bottom_data[bottom_index_base + y1*width + x1]; DType diff_x = (U11*dist_y + U10*(1 - dist_y) - U01*dist_y - U00*(1 - dist_y)) *trans_std*diff_val; diff_x *= roi_width; DType diff_y = (U11*dist_x + U01*(1 - dist_x) - U10*dist_x - U00*(1 - dist_x)) *trans_std*diff_val; diff_y *= roi_height; atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x); atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y); } } } } template<typename DType> inline void DeformablePSROIPoolv2BackwardAcc(const std::vector<Tensor<gpu, 4, DType>> &in_grads, const Tensor<gpu, 4, DType> &trans_grad, const Tensor<gpu, 4, DType> &out_grad, const std::vector<Tensor<gpu, 4, DType>> &datas, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &trans, const Tensor<gpu, 4, DType> &top_count, const bool no_trans, const nnvm::Tuple<float> spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { // LOG(INFO) << "DeformablePSROIPoolBackward"; const DType *top_diff = out_grad.dptr_; std::vector<const DType*> _stage_bottom_data; for (int i = 0; i < datas.size(); i++) _stage_bottom_data.push_back(datas[i].dptr_); const DType **stage_bottom_data; DeformablePSROIPOOLING_CUDA_CHECK(cudaMalloc(&stage_bottom_data, sizeof(const DType*) * _stage_bottom_data.size())); DeformablePSROIPOOLING_CUDA_CHECK(cudaMemcpy(stage_bottom_data, &_stage_bottom_data[0], sizeof(const DType*) * _stage_bottom_data.size(), cudaMemcpyHostToDevice)); const DType *bottom_rois = bbox.dptr_; const DType *bottom_trans = no_trans ? NULL : trans.dptr_; std::vector<DType*> _stage_bottom_data_diff; for (int i = 0; i < in_grads.size(); i++) _stage_bottom_data_diff.push_back(in_grads[i].dptr_); DType **stage_bottom_data_diff; DeformablePSROIPOOLING_CUDA_CHECK(cudaMalloc(&stage_bottom_data_diff, sizeof(DType*) * _stage_bottom_data_diff.size())); DeformablePSROIPOOLING_CUDA_CHECK(cudaMemcpy(stage_bottom_data_diff, &_stage_bottom_data_diff[0], sizeof(DType*) * _stage_bottom_data_diff.size(), cudaMemcpyHostToDevice)); DType *bottom_trans_diff = no_trans ? NULL : trans_grad.dptr_; const DType *top_count_data = top_count.dptr_; const int count = out_grad.shape_.Size(); const int num_rois = bbox.size(0); const int channels = in_grads[0].size(1); std::vector<int> _stage_height; for (int i = 0; i < in_grads.size(); i++) _stage_height.push_back(in_grads[i].size(2)); int *stage_height; DeformablePSROIPOOLING_CUDA_CHECK(cudaMalloc(&stage_height, sizeof(int) * _stage_height.size())); DeformablePSROIPOOLING_CUDA_CHECK(cudaMemcpy(stage_height, &_stage_height[0], sizeof(int) * _stage_height.size(), cudaMemcpyHostToDevice)); std::vector<int> _stage_width; for (int i = 0; i < in_grads.size(); i++) _stage_width.push_back(in_grads[i].size(3)); int *stage_width; DeformablePSROIPOOLING_CUDA_CHECK(cudaMalloc(&stage_width, sizeof(int) * _stage_width.size())); DeformablePSROIPOOLING_CUDA_CHECK(cudaMemcpy(stage_width, &_stage_width[0], sizeof(int) * _stage_width.size(), cudaMemcpyHostToDevice)); const int pooled_height = pooled_size; const int pooled_width = pooled_size; const int num_classes = no_trans ? 1 : trans_grad.size(1) / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; std::vector<DType> _stage_spatial_scale; for (int i = 0; i < spatial_scale.ndim(); i++) _stage_spatial_scale.push_back(spatial_scale[i]); DType *stage_spatial_scale; DeformablePSROIPOOLING_CUDA_CHECK(cudaMalloc(&stage_spatial_scale, sizeof(DType) * _stage_spatial_scale.size())); DeformablePSROIPOOLING_CUDA_CHECK(cudaMemcpy(stage_spatial_scale, &_stage_spatial_scale[0], sizeof(DType) * _stage_spatial_scale.size(), cudaMemcpyHostToDevice)); cudaStream_t stream = Stream<gpu>::GetStream(in_grads[0].stream_); DeformablePSROIPoolv2BackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream >> >( count, top_diff, top_count_data, num_rois, stage_spatial_scale, channels, stage_height, stage_width, pooled_height, pooled_width, output_dim, stage_bottom_data_diff, bottom_trans_diff, stage_bottom_data, bottom_rois, bottom_trans, no_trans, trans_std, sample_per_part, group_size, part_size, num_classes, channels_each_class); DeformablePSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError()); DeformablePSROIPOOLING_CUDA_CHECK(cudaFree(stage_bottom_data)); DeformablePSROIPOOLING_CUDA_CHECK(cudaFree(stage_bottom_data_diff)); DeformablePSROIPOOLING_CUDA_CHECK(cudaFree(stage_height)); DeformablePSROIPOOLING_CUDA_CHECK(cudaFree(stage_width)); DeformablePSROIPOOLING_CUDA_CHECK(cudaFree(stage_spatial_scale)); } } // namespace cuda template<typename DType> inline void DeformablePSROIPoolv2Forward(const Tensor<gpu, 4, DType> &out, const std::vector<Tensor<gpu, 4, DType>> &datas, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &trans, const Tensor<gpu, 4, DType> &top_count, const bool no_trans, const nnvm::Tuple<float> spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { cuda::DeformablePSROIPoolv2Forward(out, datas, bbox, trans, top_count, no_trans, spatial_scale, output_dim, group_size, pooled_size, part_size, sample_per_part, trans_std); } template<typename DType> inline void DeformablePSROIPoolv2BackwardAcc(const std::vector<Tensor<gpu, 4, DType>> &in_grads, const Tensor<gpu, 4, DType> &trans_grad, const Tensor<gpu, 4, DType> &out_grad, const std::vector<Tensor<gpu, 4, DType>> &datas, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &trans, const Tensor<gpu, 4, DType> &top_count, const bool no_trans, const nnvm::Tuple<float> spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { cuda::DeformablePSROIPoolv2BackwardAcc(in_grads, trans_grad, out_grad, datas, bbox, trans, top_count, no_trans, spatial_scale, output_dim, group_size, pooled_size, part_size, sample_per_part, trans_std); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(DeformablePSROIPoolingv2Param param, int dtype) { Operator* op = nullptr; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new DeformablePSROIPoolingv2Op<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
e45e37a65f6475e217f711ec200b5bf4d7b9ea9a.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrust/device_vector.h> #include "cm.h" unsigned int filter(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a, CudaSet* b, unsigned int segment, thrust::device_vector<unsigned int>& dev_p) { stack<string> exe_type; stack<string> exe_value; stack<int_type*> exe_vectors; stack<float_type*> exe_vectors_f; stack<int_type> exe_nums; stack<bool*> bool_vectors; stack<float_type> exe_nums_f; string s1, s2, s1_val, s2_val; int_type n1, n2, res; float_type n1_f, n2_f, res_f; for(int i=0; !op_type.empty(); ++i, op_type.pop()) { string ss = op_type.front(); if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("VECTOR") == 0 || ss.compare("FLOAT") == 0 || ss.compare("STRING") == 0) { exe_type.push(ss); if (ss.compare("NUMBER") == 0) { exe_nums.push(op_nums.front()); op_nums.pop(); } else if (ss.compare("NAME") == 0 || ss.compare("STRING") == 0) { exe_value.push(op_value.front()); op_value.pop(); } if (ss.compare("FLOAT") == 0) { exe_nums_f.push(op_nums_f.front()); op_nums_f.pop(); } } else { if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) { // get 2 values from the stack s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); if (ss.compare("ADD") == 0 ) res = n1+n2; else if (ss.compare("MUL") == 0 ) res = n1*n2; else if (ss.compare("DIV") == 0 ) res = n1/n2; else res = n1-n2; thrust::device_ptr<int_type> p = thrust::device_malloc<int_type>(a->mRecCount); thrust::sequence(p, p+(a->mRecCount),res,(int_type)0); exe_type.push("VECTOR"); exe_vectors.push(thrust::raw_pointer_cast(p)); } else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2_f = exe_nums_f.top(); exe_nums_f.pop(); if (ss.compare("ADD") == 0 ) res_f = n1_f+n2_f; else if (ss.compare("MUL") == 0 ) res_f = n1_f*n2_f; else if (ss.compare("DIV") == 0 ) res_f = n1_f/n2_f; else res_f = n1_f-n2_f; thrust::device_ptr<float_type> p = thrust::device_malloc<float_type>(a->mRecCount); thrust::sequence(p, p+(a->mRecCount),res_f,(float_type)0); exe_type.push("VECTOR F"); exe_vectors_f.push(thrust::raw_pointer_cast(p)); } else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) { s1_val = exe_value.top(); exe_value.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR F"); if (a->type[(a->columnNames)[s1_val]] == 1) { float_type* t = a->get_float_type_by_name(s1_val); exe_vectors_f.push(a->op(t,n1_f,ss,1)); } else { int_type* t = a->get_int_by_name(s1_val); exe_vectors_f.push(a->op(t,n1_f,ss,1)); }; } else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR F"); if (a->type[(a->columnNames)[s2_val]] == 1) { float_type* t = a->get_float_type_by_name(s2_val); exe_vectors_f.push(a->op(t,n1_f,ss,0)); } else { int_type* t = a->get_int_by_name(s2_val); exe_vectors_f.push(a->op(t,n1_f,ss,0)); }; } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { s1_val = exe_value.top(); exe_value.pop(); n1 = exe_nums.top(); exe_nums.pop(); if (a->type[(a->columnNames)[s1_val]] == 1) { float_type* t = a->get_float_type_by_name(s1_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,(float_type)n1,ss,1)); } else { int_type* t = a->get_int_by_name(s1_val); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,n1,ss,1)); }; } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 1) { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,(float_type)n1,ss,0)); } else { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,n1,ss,0)); }; } else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t1 = a->get_int_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,t1,ss,0)); } else { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t1,t,ss,0)); }; } else { float_type* t = a->get_float_type_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t1 = a->get_int_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t1,t,ss,0)); } else { float_type* t1 = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,t1,ss,0)); }; } } else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) { s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,s3,ss,0)); //free s3 hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,0)); hipFree(s3); } } else { float_type* t = a->get_float_type_by_name(s2_val); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,t, ss,0)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,0)); hipFree(s3); } }; } else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,s3,ss,1)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,1)); hipFree(s3); } } else { float_type* t = a->get_float_type_by_name(s1_val); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,t,ss,1)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,1)); hipFree(s3); } }; } else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(s3,n1, ss,1)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1, ss,1)); hipFree(s3); } } else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") || s2.compare("VECTOR F") == 0) { n1 = exe_nums.top(); exe_nums.pop(); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(s3,n1, ss,0)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1, ss,0)); hipFree(s3); } } else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,1)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,1)); hipFree(s3); } } else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) { n1_f = exe_nums.top(); exe_nums.pop(); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,0)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,0)); hipFree(s3); } } else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); int_type* s4 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(s3, s4,ss,1)); hipFree(s3); hipFree(s4); } else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); float_type* s4 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3, s4,ss,1)); hipFree(s3); hipFree(s4); } else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); float_type* s4 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3, s4,ss,0)); hipFree(s3); hipFree(s4); } else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); float_type* s4 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3, s4,ss,1)); hipFree(s3); hipFree(s4); } } else if (ss.compare("CMP") == 0) { int_type cmp_type = op_nums.front(); op_nums.pop(); s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1,n2,cmp_type)); } else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1_f,n2_f,cmp_type)); } else if (s1.compare("FLOAT") == 0 && s2.compare("NUMBER") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1_f,float_type(n2),cmp_type)); } else if (s1.compare("NUMBER") == 0 && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1_f,float_type(n2),cmp_type)); } else if (s1.compare("STRING") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); unsigned int colIndex1 = (a->columnNames).find(s2_val)->second; CudaChar* cc = (a->h_columns_cuda_char)[a->type_index[colIndex1]]; exe_type.push("VECTOR"); bool_vectors.push(cc->cmpStr(s1_val)); } else if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); unsigned int colIndex1 = (a->columnNames).find(s1_val)->second; CudaChar* cc = (a->h_columns_cuda_char)[a->type_index[colIndex1]]; exe_type.push("VECTOR"); bool_vectors.push(cc->cmpStr(s2_val)); } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s1_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,(float_type)n1,cmp_type)); }; } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,(float_type)n1,cmp_type)); }; } else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); s1_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1_f,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1_f,cmp_type)); }; } else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) { cmp_type = reverse_op(cmp_type); n1_f = exe_nums_f.top(); exe_nums_f.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1_f,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1_f,cmp_type)); }; } else if (s1.compare("VECTOR F") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1,cmp_type)); hipFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1,cmp_type)); hipFree(s3); } else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1,cmp_type)); hipFree(s3); } else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1,cmp_type)); hipFree(s3); } else if (s1.compare("VECTOR F") == 0 && s2.compare("FLOAT") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1_f,cmp_type)); hipFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("FLOAT") == 0) { cmp_type = reverse_op(cmp_type); int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1_f,cmp_type)); hipFree(s3); } else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1_f,cmp_type)); hipFree(s3); } else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1_f,cmp_type)); hipFree(s3); } else if (s1.compare("VECTOR F") == 0 && s2.compare("NAME") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(s3,t,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; hipFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("NAME") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; hipFree(s3); } else if (s1.compare("NAME") == 0 && s2.compare("VECTOR F") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(s3,t,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; hipFree(s3); } else if (s1.compare("NAME") == 0 && s2.compare("VECTOR") == 0) { cmp_type = reverse_op(cmp_type); int_type* s3 = exe_vectors.top(); exe_vectors.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; hipFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); int_type* s2 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s2,s3,cmp_type)); hipFree(s3); hipFree(s2); } else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); float_type* s2 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s2,s3,cmp_type)); hipFree(s3); hipFree(s2); } else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); int_type* s2 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,s2,cmp_type)); hipFree(s3); hipFree(s2); } else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); int_type* s2 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,s2,cmp_type)); hipFree(s3); hipFree(s2); } else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t1 = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t1,t,cmp_type)); } else { float_type* t1 = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t1,t,cmp_type)); }; } else { cmp_type = reverse_op(cmp_type); float_type* t = a->get_float_type_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t1 = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t,t1,cmp_type)); } else { float_type* t1 = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,t1,cmp_type)); }; } } } else if (ss.compare("AND") == 0) { bool* s3 = bool_vectors.top(); bool_vectors.pop(); bool* s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->logical_and(s2,s3)); hipFree(s3); } else if (ss.compare("OR") == 0) { bool* s3 = bool_vectors.top(); bool_vectors.pop(); bool* s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->logical_or(s2,s3)); hipFree(s3); } else { cout << "found nothing " << endl; } }; }; thrust::device_ptr<bool> bp((bool*)bool_vectors.top()); unsigned int count = thrust::count(bp, bp + a->mRecCount, 1); b->mRecCount = b->mRecCount + count; b->prm.push_back(new unsigned int[count]); b->prm_count.push_back(count); b->prm_index.push_back('R'); thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator(a->mRecCount), bp, dev_p.begin(), nz<bool>()); hipMemcpy((void**)b->prm[segment], (void**)(thrust::raw_pointer_cast(dev_p.data())), 4*count, hipMemcpyDeviceToHost); b->type_index = a->type_index; hipFree(bool_vectors.top()); return b->mRecCount; }
e45e37a65f6475e217f711ec200b5bf4d7b9ea9a.cu
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrust/device_vector.h> #include "cm.h" unsigned int filter(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a, CudaSet* b, unsigned int segment, thrust::device_vector<unsigned int>& dev_p) { stack<string> exe_type; stack<string> exe_value; stack<int_type*> exe_vectors; stack<float_type*> exe_vectors_f; stack<int_type> exe_nums; stack<bool*> bool_vectors; stack<float_type> exe_nums_f; string s1, s2, s1_val, s2_val; int_type n1, n2, res; float_type n1_f, n2_f, res_f; for(int i=0; !op_type.empty(); ++i, op_type.pop()) { string ss = op_type.front(); if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("VECTOR") == 0 || ss.compare("FLOAT") == 0 || ss.compare("STRING") == 0) { exe_type.push(ss); if (ss.compare("NUMBER") == 0) { exe_nums.push(op_nums.front()); op_nums.pop(); } else if (ss.compare("NAME") == 0 || ss.compare("STRING") == 0) { exe_value.push(op_value.front()); op_value.pop(); } if (ss.compare("FLOAT") == 0) { exe_nums_f.push(op_nums_f.front()); op_nums_f.pop(); } } else { if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) { // get 2 values from the stack s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); if (ss.compare("ADD") == 0 ) res = n1+n2; else if (ss.compare("MUL") == 0 ) res = n1*n2; else if (ss.compare("DIV") == 0 ) res = n1/n2; else res = n1-n2; thrust::device_ptr<int_type> p = thrust::device_malloc<int_type>(a->mRecCount); thrust::sequence(p, p+(a->mRecCount),res,(int_type)0); exe_type.push("VECTOR"); exe_vectors.push(thrust::raw_pointer_cast(p)); } else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2_f = exe_nums_f.top(); exe_nums_f.pop(); if (ss.compare("ADD") == 0 ) res_f = n1_f+n2_f; else if (ss.compare("MUL") == 0 ) res_f = n1_f*n2_f; else if (ss.compare("DIV") == 0 ) res_f = n1_f/n2_f; else res_f = n1_f-n2_f; thrust::device_ptr<float_type> p = thrust::device_malloc<float_type>(a->mRecCount); thrust::sequence(p, p+(a->mRecCount),res_f,(float_type)0); exe_type.push("VECTOR F"); exe_vectors_f.push(thrust::raw_pointer_cast(p)); } else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) { s1_val = exe_value.top(); exe_value.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR F"); if (a->type[(a->columnNames)[s1_val]] == 1) { float_type* t = a->get_float_type_by_name(s1_val); exe_vectors_f.push(a->op(t,n1_f,ss,1)); } else { int_type* t = a->get_int_by_name(s1_val); exe_vectors_f.push(a->op(t,n1_f,ss,1)); }; } else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR F"); if (a->type[(a->columnNames)[s2_val]] == 1) { float_type* t = a->get_float_type_by_name(s2_val); exe_vectors_f.push(a->op(t,n1_f,ss,0)); } else { int_type* t = a->get_int_by_name(s2_val); exe_vectors_f.push(a->op(t,n1_f,ss,0)); }; } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { s1_val = exe_value.top(); exe_value.pop(); n1 = exe_nums.top(); exe_nums.pop(); if (a->type[(a->columnNames)[s1_val]] == 1) { float_type* t = a->get_float_type_by_name(s1_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,(float_type)n1,ss,1)); } else { int_type* t = a->get_int_by_name(s1_val); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,n1,ss,1)); }; } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 1) { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,(float_type)n1,ss,0)); } else { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,n1,ss,0)); }; } else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t1 = a->get_int_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,t1,ss,0)); } else { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t1,t,ss,0)); }; } else { float_type* t = a->get_float_type_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t1 = a->get_int_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t1,t,ss,0)); } else { float_type* t1 = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,t1,ss,0)); }; } } else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) { s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,s3,ss,0)); //free s3 cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,0)); cudaFree(s3); } } else { float_type* t = a->get_float_type_by_name(s2_val); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,t, ss,0)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,0)); cudaFree(s3); } }; } else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,s3,ss,1)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,1)); cudaFree(s3); } } else { float_type* t = a->get_float_type_by_name(s1_val); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,t,ss,1)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,1)); cudaFree(s3); } }; } else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(s3,n1, ss,1)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1, ss,1)); cudaFree(s3); } } else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") || s2.compare("VECTOR F") == 0) { n1 = exe_nums.top(); exe_nums.pop(); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(s3,n1, ss,0)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1, ss,0)); cudaFree(s3); } } else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,1)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,1)); cudaFree(s3); } } else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) { n1_f = exe_nums.top(); exe_nums.pop(); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,0)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,0)); cudaFree(s3); } } else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); int_type* s4 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(s3, s4,ss,1)); cudaFree(s3); cudaFree(s4); } else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); float_type* s4 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3, s4,ss,1)); cudaFree(s3); cudaFree(s4); } else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); float_type* s4 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3, s4,ss,0)); cudaFree(s3); cudaFree(s4); } else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); float_type* s4 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3, s4,ss,1)); cudaFree(s3); cudaFree(s4); } } else if (ss.compare("CMP") == 0) { int_type cmp_type = op_nums.front(); op_nums.pop(); s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1,n2,cmp_type)); } else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1_f,n2_f,cmp_type)); } else if (s1.compare("FLOAT") == 0 && s2.compare("NUMBER") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1_f,float_type(n2),cmp_type)); } else if (s1.compare("NUMBER") == 0 && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1_f,float_type(n2),cmp_type)); } else if (s1.compare("STRING") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); unsigned int colIndex1 = (a->columnNames).find(s2_val)->second; CudaChar* cc = (a->h_columns_cuda_char)[a->type_index[colIndex1]]; exe_type.push("VECTOR"); bool_vectors.push(cc->cmpStr(s1_val)); } else if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); unsigned int colIndex1 = (a->columnNames).find(s1_val)->second; CudaChar* cc = (a->h_columns_cuda_char)[a->type_index[colIndex1]]; exe_type.push("VECTOR"); bool_vectors.push(cc->cmpStr(s2_val)); } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s1_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,(float_type)n1,cmp_type)); }; } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,(float_type)n1,cmp_type)); }; } else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); s1_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1_f,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1_f,cmp_type)); }; } else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) { cmp_type = reverse_op(cmp_type); n1_f = exe_nums_f.top(); exe_nums_f.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1_f,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1_f,cmp_type)); }; } else if (s1.compare("VECTOR F") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1,cmp_type)); cudaFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1,cmp_type)); cudaFree(s3); } else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1,cmp_type)); cudaFree(s3); } else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1,cmp_type)); cudaFree(s3); } else if (s1.compare("VECTOR F") == 0 && s2.compare("FLOAT") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1_f,cmp_type)); cudaFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("FLOAT") == 0) { cmp_type = reverse_op(cmp_type); int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1_f,cmp_type)); cudaFree(s3); } else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1_f,cmp_type)); cudaFree(s3); } else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1_f,cmp_type)); cudaFree(s3); } else if (s1.compare("VECTOR F") == 0 && s2.compare("NAME") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(s3,t,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; cudaFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("NAME") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; cudaFree(s3); } else if (s1.compare("NAME") == 0 && s2.compare("VECTOR F") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(s3,t,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; cudaFree(s3); } else if (s1.compare("NAME") == 0 && s2.compare("VECTOR") == 0) { cmp_type = reverse_op(cmp_type); int_type* s3 = exe_vectors.top(); exe_vectors.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; cudaFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); int_type* s2 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s2,s3,cmp_type)); cudaFree(s3); cudaFree(s2); } else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); float_type* s2 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s2,s3,cmp_type)); cudaFree(s3); cudaFree(s2); } else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); int_type* s2 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,s2,cmp_type)); cudaFree(s3); cudaFree(s2); } else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); int_type* s2 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,s2,cmp_type)); cudaFree(s3); cudaFree(s2); } else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t1 = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t1,t,cmp_type)); } else { float_type* t1 = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t1,t,cmp_type)); }; } else { cmp_type = reverse_op(cmp_type); float_type* t = a->get_float_type_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t1 = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t,t1,cmp_type)); } else { float_type* t1 = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,t1,cmp_type)); }; } } } else if (ss.compare("AND") == 0) { bool* s3 = bool_vectors.top(); bool_vectors.pop(); bool* s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->logical_and(s2,s3)); cudaFree(s3); } else if (ss.compare("OR") == 0) { bool* s3 = bool_vectors.top(); bool_vectors.pop(); bool* s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->logical_or(s2,s3)); cudaFree(s3); } else { cout << "found nothing " << endl; } }; }; thrust::device_ptr<bool> bp((bool*)bool_vectors.top()); unsigned int count = thrust::count(bp, bp + a->mRecCount, 1); b->mRecCount = b->mRecCount + count; b->prm.push_back(new unsigned int[count]); b->prm_count.push_back(count); b->prm_index.push_back('R'); thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator(a->mRecCount), bp, dev_p.begin(), nz<bool>()); cudaMemcpy((void**)b->prm[segment], (void**)(thrust::raw_pointer_cast(dev_p.data())), 4*count, cudaMemcpyDeviceToHost); b->type_index = a->type_index; cudaFree(bool_vectors.top()); return b->mRecCount; }
4a1f7c04109e5db66beb9d23810c1cd73e6c8d2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// LSU EE 7700-2 (Sp 2011), GPU Microarchitecture // /// Homework 2 -- PARTIAL SOLUTION /// Homework 3 -- Assignment (Edit this file.) // This file has a solution to Homework 2 Problem 3(a). // For Homework 3, edit routine stencil_shared_3. #include "sol.cuh" // Constants holding array sizes and pointers and coefficients. // // Values are set by cuda calls, they don't automatically take values // of variables in the C program with the same name. // __constant__ float v0, v1, v2; __constant__ int array_size; __constant__ int row_stride, dim_size_lg, dim_block_lg; __constant__ float* a; __constant__ float* b; __constant__ int homework_R; extern __shared__ float s[]; // Shared memory for buffering a elements. __global__ void stencil(); __global__ void stencil_iter(); __global__ void stencil_shared(); __global__ void stencil_shared_2(); __global__ void stencil_shared_3(); static __host__ int kernels_get_attr_(pCUDA_Func_Attributes *attr) { int count = 0; #define GETATTR(func,version) \ count++; \ if ( attr ) { \ attr->err = hipFuncGetAttributes(&attr->attr,func); \ attr->name = #func; \ attr->abbrev = version; \ attr++; \ } GETATTR(stencil,'1'); GETATTR(stencil_iter,'i'); GETATTR(stencil_shared,'s'); GETATTR(stencil_shared_2,'2'); GETATTR(stencil_shared_3,'3'); return count; #undef GETATTR } __host__ int kernels_get_attr(pCUDA_Func_Attributes **attr) { int count = kernels_get_attr_(NULL); *attr = (pCUDA_Func_Attributes*) calloc(count,sizeof(**attr)); return kernels_get_attr_(*attr); } // This routine executes on the CPU. // __host__ void stencil_launch(dim3 dg, dim3 db, int shared_bytes, char version) { // Launch the kernel, using the provided configuration (block size, etc). // switch ( version ) { case 'i':hipLaunchKernelGGL(( stencil_iter), dim3(dg),dim3(db), 0, 0, ); break; case 's':hipLaunchKernelGGL(( stencil_shared), dim3(dg),dim3(db),shared_bytes, 0, ); break; case '2':hipLaunchKernelGGL(( stencil_shared_2), dim3(dg),dim3(db),shared_bytes, 0, ); break; case '3':hipLaunchKernelGGL(( stencil_shared_3), dim3(dg),dim3(db),shared_bytes, 0, ); break; } } __global__ void stencil() { // Compute a unique index (number) for this thread. // This will be used as an array index. // int idx = threadIdx.x + blockIdx.x * blockDim.x; int row_mask = row_stride - 1; int col = idx & row_mask; int row = idx >> dim_size_lg; if ( row == 0 || row >= row_mask || col == 0 || col == row_mask ) return; int iu = idx - row_stride; int id = idx + row_stride; b[idx] = v0 * a[idx] + v1 * ( a[idx-1] + a[idx+1] + a[iu] + a[id] ) + v2 * ( a[iu-1] + a[iu+1] + a[id-1] + a[id+1] ); } __global__ void stencil_iter() { // Compute a unique index (number) for this thread. // This will be used as an array index. // int tid = threadIdx.x + blockIdx.x * blockDim.x; int row_mask = row_stride - 1; int thread_count = blockDim.x * gridDim.x; for ( int idx = tid; idx < array_size; idx += thread_count ) { int col = idx & row_mask; int row = idx >> dim_size_lg; if ( row == 0 || row >= row_mask || col == 0 || col == row_mask ) continue; int iu = idx - row_stride; int id = idx + row_stride; #ifdef DEBUG_STENCIL b[idx] = v0 * a[idx]; #else b[idx] = v0 * a[idx] + v1 * ( a[idx-1] + a[idx+1] + a[iu] + a[id] ) + v2 * ( a[iu-1] + a[iu+1] + a[id-1] + a[id+1] ); #endif } } __global__ void stencil_shared() { // This code operates on a square array of pixels. // Compute the array_row_stride, which for this code is also equal // to the number of rows and the number of columns. // int array_row_stride = 1 << dim_size_lg; int array_row_mask = array_row_stride - 1; // Determine how many pixels each block will compute. The number is // based on the fact that the first and last thread of each block // never compute a pixel, the other threads compute at most one // pixel each. // int block_compute_width = blockDim.x - 2; // Determine how many blocks are needed to compute all of the // pixels in one row. The computation is based on the fact that // the first and last pixels in a row should be left untouched. // int blocks_per_row = ceilf( float(array_row_stride-2) / block_compute_width ); // Determine how many rows each block computes. // int rows_per_block = ceilf( float(blocks_per_row) * array_row_stride / gridDim.x ); int cols_per_block = block_compute_width; // If this thread is past the last column that a block is supposed // to compute or load, then return. // if ( threadIdx.x >= cols_per_block + 2 ) return; // Compute a "large" row number for this block. These large // row numbers can exceed the number of rows in the array. // The actual row number to use is row_0_large mod blockDim.x, // the column number is // ( row_0_large / blockDim.x ) * cols_per_block + threadIdx.x. // int row_0_large = rows_per_block * blockIdx.x; // Starting row. int row_9_large = row_0_large + rows_per_block + 2; // Ending row. // A column group is a set of columns handled by one block. If // blocks_per_row < gridDim.x (the number of blocks) then a block // will have to cover more than one sets of columns (put another // way, a thread will have to cover more than one column). // int col_group_0 = row_0_large >> dim_size_lg; // Starting column. int col_group_9 = row_9_large >> dim_size_lg; // Ending column. // Compute indices into shared memory. At any one time shared memory // will hold three rows of pixels. Index siu holds the upper row, // sidx holds the middle row (corresponding to the pixel to be written), // and sid is the lower (down) row. A single shared memory array // is used to hold all three rows. // int siu = threadIdx.x; int sidx = siu + blockDim.x; int sid = sidx + blockDim.x; for ( int col_group = col_group_0; col_group <= col_group_9; col_group++ ) { // The starting row will be zero if this isn't the first column // group (because computation reached the bottom of one column // and is now wrapping around to the top [row 0] of another column). // int row_0 = col_group == col_group_0 ? row_0_large & array_row_mask : 0; int row_9 = col_group == col_group_9 ? ( row_9_large & array_row_mask ) : array_row_mask; // Compute the first column number for the block. // int col_0 = col_group * cols_per_block; // Compute the column number for this thread. // int col = col_0 + threadIdx.x; if ( col >= array_row_stride ) return; // Shift the row numbers over so that they can easily be used // to compute the array idx. // int row_0s = row_0 << dim_size_lg; int row_9s = row_9 << dim_size_lg; // Check whether we should just load a value to shared memory. // If load_only is false then we both load the value and compute // a pixel. // bool load_only = threadIdx.x == 0 || threadIdx.x == cols_per_block + 1 || col == array_row_stride - 1; // Compute the array index for the "up" row. This will be loaded // to shared memory but nothing will be computed for it. // int idx = row_0s | col; s[siu] = a[idx]; // Increment by the row stride to obtain the address of the middle // row. We will compute a value for this in the first iteration // of the while loop below. // idx += array_row_stride; s[sidx] = a[idx]; // Compute the array index at which we should stop. // int idx_stop = row_9s | col; // Compute pixels for column col, starting from row_0s + // array_row_stride and ending at row_9s. // while ( idx < idx_stop ) { // Compute the address of the pixel in the row below us, // idx_next, and load it into shared memory. // int idx_next = idx + array_row_stride; s[sid] = a[idx_next]; // Wait for other threads in this block to finish writing // shared memory. // __syncthreads(); if ( !load_only ) { // Compute the pixel value and write it to b. #ifdef DEBUG_STENCIL b[idx] = v0 * s[sidx]; #else b[idx] = v0 * s[sidx] + v1 * ( s[sidx-1] + s[sidx+1] + s[siu] + s[sid] ) + v2 * ( s[siu-1] + s[siu+1] + s[sid-1] + s[sid+1] ); #endif } // Wait for other threads in this block to finish reading // shared memory. // __syncthreads(); // Rotate indices so that what is currently the middle row, // sidx, becomes the up row, and what is currently the down // row, sid, becomes the current row, and siu will be the sid. // int sid_new = siu; siu = sidx; sidx = sid; sid = sid_new; // Move the idx down one row. // idx = idx_next; } } } __global__ void stencil_shared_2() { /// SOLUTION - Homework 2 /// DO NOT edit this routine for Homework 3, instead modify stencil_shared_3. /// NOTE: This solution is inefficient. // // The code here will run more slowly than stencil_shared, at least // on CC 1.x devices, due to inefficient global and shared memory // access patterns. // // This code does not include unrolling. That will be added // later to another routine. // // For the solution to Problem 3a the code here is adjusted so that // each thread can operate on R pixels. Look for the "Times R" // comments. // int array_row_stride = 1 << dim_size_lg; int array_row_mask = array_row_stride - 1; // Adjust the number of columns that each block computes. // int cols_per_block = blockDim.x * homework_R - 2; // Times R int blocks_per_row = ceilf( float(array_row_stride-2) / cols_per_block ); int rows_per_block = ceilf( float(blocks_per_row) * array_row_stride / gridDim.x ); int row_0_large = rows_per_block * blockIdx.x; int row_9_large = row_0_large + rows_per_block + 2; int col_group_0 = row_0_large >> dim_size_lg; int col_group_9 = row_9_large >> dim_size_lg; // Adjust the indices into shared memory. // int siu = threadIdx.x * homework_R; // Times R int sidx = siu + blockDim.x * homework_R; // Times R int sid = sidx + blockDim.x * homework_R; // Times R for ( int col_group = col_group_0; col_group <= col_group_9; col_group++ ) { int row_0 = col_group == col_group_0 ? row_0_large & array_row_mask : 0; int row_9 = col_group == col_group_9 ? ( row_9_large & array_row_mask ) : array_row_mask; int col_0 = col_group * cols_per_block; int col_9 = min( col_0 + cols_per_block, array_row_stride - 2 ); // Since each thread handles R pixels need to multiply by R to // find the starting column number for a thread. // int col = col_0 + threadIdx.x * homework_R; // Times R if ( col >= array_row_stride ) return; int row_s = row_0 << dim_size_lg; int row_9s = row_9 << dim_size_lg; // Use loops to load data. // for ( int i=0; i<homework_R; i++ ) { // Compute column number for this element. // int coli = col + i; // If column out of range skip this element. // if ( coli >= array_row_stride ) continue; // Compute array index, and cache elements. // int idx = row_s + coli; s[siu+i] = a[ idx ]; s[sidx+i] = a[ idx + array_row_stride ]; } row_s += array_row_stride; while ( row_s < row_9s ) { int row_next = row_s + array_row_stride; for ( int i=0; i<homework_R; i++ ) { // Cache the next row of elements. // int coli = col + i; if ( coli < array_row_stride ) s[sid + i] = a[ row_next + coli ]; } __syncthreads(); // Use a loop for computation. // for ( int i=0; i<homework_R; i++ ) { // Compute column and if out of range, skip this element. // int coli = col + i; if ( coli == col_0 || coli > col_9 ) continue; int idx = row_s + coli; #ifdef DEBUG_STENCIL b[idx] = v0 * s[sidx+i]; #else b[idx] = v0 * s[sidx+i] + v1 * ( s[sidx+i-1] + s[sidx+i+1] + s[siu+i] + s[sid+i] ) + v2 * ( s[siu+i-1] + s[siu+i+1] + s[sid+i-1] + s[sid+i+1] ); #endif } __syncthreads(); int sid_new = siu; siu = sidx; sidx = sid; sid = sid_new; row_s = row_next; } } } __global__ void stencil_shared_3() { /// SOLVE HOMEWORK 3 HERE /// NOTE: This code is inefficient. // // The code here will run more slowly than stencil_shared, at least // on CC 1.x devices, due to inefficient global and shared memory // access patterns. // // Fix it. int array_row_stride = 1 << dim_size_lg; int array_row_mask = array_row_stride - 1; // Adjust the number of columns that each block computes. // int cols_per_block = blockDim.x * homework_R - 2; // Times R int blocks_per_row = ceilf( float(array_row_stride-2) / cols_per_block ); int rows_per_block = ceilf( float(blocks_per_row) * array_row_stride / gridDim.x ); int row_0_large = rows_per_block * blockIdx.x; int row_9_large = row_0_large + rows_per_block + 2; int col_group_0 = row_0_large >> dim_size_lg; int col_group_9 = row_9_large >> dim_size_lg; // Adjust the indices into shared memory. // int siu = threadIdx.x;// *homework_R; // Times R int sidx = siu + blockDim.x*homework_R ; // Times R int sid = sidx + blockDim.x*homework_R ; // Times R for ( int col_group = col_group_0; col_group <= col_group_9; col_group++ ) { int row_0 = col_group == col_group_0 ? row_0_large & array_row_mask : 0; int row_9 = col_group == col_group_9 ? ( row_9_large & array_row_mask ) : array_row_mask; int col_0 = col_group * cols_per_block; int col_9 = min( col_0 + cols_per_block, array_row_stride - 2 ); // Since each thread handles R pixels need to multiply by R to // find the starting column number for a thread. // int col = col_0 + threadIdx.x; // Times R if ( col >= array_row_stride ) return; int row_s = row_0 << dim_size_lg; int row_9s = row_9 << dim_size_lg; // Use loops to load data. // for ( int i=0; i<blockDim.x*homework_R; i+=blockDim.x ) { // Compute column number for this element. // int coli = col + i; // If column out of range skip this element. // if ( coli >= array_row_stride ) continue; // Compute array index, and cache elements. // int idx = row_s + coli; s[siu+i] = a[ idx ]; s[sidx+i] = a[ idx + array_row_stride ]; } row_s += array_row_stride; while ( row_s < row_9s ) { int row_next = row_s + array_row_stride; for ( int i=0; i<blockDim.x*homework_R; i+=blockDim.x ) { // Cache the next row of elements. // int coli = col + i; if ( coli < array_row_stride ) s[sid + i] = a[ row_next + coli ]; } __syncthreads(); // Use a loop for computation. // for ( int i=0; i<blockDim.x*homework_R; i+=blockDim.x ) { // Compute column and if out of range, skip this element. // int coli = col + i; if ( coli == col_0 || coli > col_9 ) continue; int idx = row_s + coli; #ifdef DEBUG_STENCIL b[idx] = v0 * s[sidx+i]; #else b[idx] = v0 * s[sidx+i] + v1 * ( s[sidx+i-1] + s[sidx+i+1] + s[siu+i] + s[sid+i] ) + v2 * ( s[siu+i-1] + s[siu+i+1] + s[sid+i-1] + s[sid+i+1] ); #endif } __syncthreads(); int sid_new = siu; siu = sidx; sidx = sid; sid = sid_new; row_s = row_next; } } }
4a1f7c04109e5db66beb9d23810c1cd73e6c8d2b.cu
/// LSU EE 7700-2 (Sp 2011), GPU Microarchitecture // /// Homework 2 -- PARTIAL SOLUTION /// Homework 3 -- Assignment (Edit this file.) // This file has a solution to Homework 2 Problem 3(a). // For Homework 3, edit routine stencil_shared_3. #include "sol.cuh" // Constants holding array sizes and pointers and coefficients. // // Values are set by cuda calls, they don't automatically take values // of variables in the C program with the same name. // __constant__ float v0, v1, v2; __constant__ int array_size; __constant__ int row_stride, dim_size_lg, dim_block_lg; __constant__ float* a; __constant__ float* b; __constant__ int homework_R; extern __shared__ float s[]; // Shared memory for buffering a elements. __global__ void stencil(); __global__ void stencil_iter(); __global__ void stencil_shared(); __global__ void stencil_shared_2(); __global__ void stencil_shared_3(); static __host__ int kernels_get_attr_(pCUDA_Func_Attributes *attr) { int count = 0; #define GETATTR(func,version) \ count++; \ if ( attr ) { \ attr->err = cudaFuncGetAttributes(&attr->attr,func); \ attr->name = #func; \ attr->abbrev = version; \ attr++; \ } GETATTR(stencil,'1'); GETATTR(stencil_iter,'i'); GETATTR(stencil_shared,'s'); GETATTR(stencil_shared_2,'2'); GETATTR(stencil_shared_3,'3'); return count; #undef GETATTR } __host__ int kernels_get_attr(pCUDA_Func_Attributes **attr) { int count = kernels_get_attr_(NULL); *attr = (pCUDA_Func_Attributes*) calloc(count,sizeof(**attr)); return kernels_get_attr_(*attr); } // This routine executes on the CPU. // __host__ void stencil_launch(dim3 dg, dim3 db, int shared_bytes, char version) { // Launch the kernel, using the provided configuration (block size, etc). // switch ( version ) { case 'i': stencil_iter<<<dg,db>>>(); break; case 's': stencil_shared<<<dg,db,shared_bytes>>>(); break; case '2': stencil_shared_2<<<dg,db,shared_bytes>>>(); break; case '3': stencil_shared_3<<<dg,db,shared_bytes>>>(); break; } } __global__ void stencil() { // Compute a unique index (number) for this thread. // This will be used as an array index. // int idx = threadIdx.x + blockIdx.x * blockDim.x; int row_mask = row_stride - 1; int col = idx & row_mask; int row = idx >> dim_size_lg; if ( row == 0 || row >= row_mask || col == 0 || col == row_mask ) return; int iu = idx - row_stride; int id = idx + row_stride; b[idx] = v0 * a[idx] + v1 * ( a[idx-1] + a[idx+1] + a[iu] + a[id] ) + v2 * ( a[iu-1] + a[iu+1] + a[id-1] + a[id+1] ); } __global__ void stencil_iter() { // Compute a unique index (number) for this thread. // This will be used as an array index. // int tid = threadIdx.x + blockIdx.x * blockDim.x; int row_mask = row_stride - 1; int thread_count = blockDim.x * gridDim.x; for ( int idx = tid; idx < array_size; idx += thread_count ) { int col = idx & row_mask; int row = idx >> dim_size_lg; if ( row == 0 || row >= row_mask || col == 0 || col == row_mask ) continue; int iu = idx - row_stride; int id = idx + row_stride; #ifdef DEBUG_STENCIL b[idx] = v0 * a[idx]; #else b[idx] = v0 * a[idx] + v1 * ( a[idx-1] + a[idx+1] + a[iu] + a[id] ) + v2 * ( a[iu-1] + a[iu+1] + a[id-1] + a[id+1] ); #endif } } __global__ void stencil_shared() { // This code operates on a square array of pixels. // Compute the array_row_stride, which for this code is also equal // to the number of rows and the number of columns. // int array_row_stride = 1 << dim_size_lg; int array_row_mask = array_row_stride - 1; // Determine how many pixels each block will compute. The number is // based on the fact that the first and last thread of each block // never compute a pixel, the other threads compute at most one // pixel each. // int block_compute_width = blockDim.x - 2; // Determine how many blocks are needed to compute all of the // pixels in one row. The computation is based on the fact that // the first and last pixels in a row should be left untouched. // int blocks_per_row = ceilf( float(array_row_stride-2) / block_compute_width ); // Determine how many rows each block computes. // int rows_per_block = ceilf( float(blocks_per_row) * array_row_stride / gridDim.x ); int cols_per_block = block_compute_width; // If this thread is past the last column that a block is supposed // to compute or load, then return. // if ( threadIdx.x >= cols_per_block + 2 ) return; // Compute a "large" row number for this block. These large // row numbers can exceed the number of rows in the array. // The actual row number to use is row_0_large mod blockDim.x, // the column number is // ( row_0_large / blockDim.x ) * cols_per_block + threadIdx.x. // int row_0_large = rows_per_block * blockIdx.x; // Starting row. int row_9_large = row_0_large + rows_per_block + 2; // Ending row. // A column group is a set of columns handled by one block. If // blocks_per_row < gridDim.x (the number of blocks) then a block // will have to cover more than one sets of columns (put another // way, a thread will have to cover more than one column). // int col_group_0 = row_0_large >> dim_size_lg; // Starting column. int col_group_9 = row_9_large >> dim_size_lg; // Ending column. // Compute indices into shared memory. At any one time shared memory // will hold three rows of pixels. Index siu holds the upper row, // sidx holds the middle row (corresponding to the pixel to be written), // and sid is the lower (down) row. A single shared memory array // is used to hold all three rows. // int siu = threadIdx.x; int sidx = siu + blockDim.x; int sid = sidx + blockDim.x; for ( int col_group = col_group_0; col_group <= col_group_9; col_group++ ) { // The starting row will be zero if this isn't the first column // group (because computation reached the bottom of one column // and is now wrapping around to the top [row 0] of another column). // int row_0 = col_group == col_group_0 ? row_0_large & array_row_mask : 0; int row_9 = col_group == col_group_9 ? ( row_9_large & array_row_mask ) : array_row_mask; // Compute the first column number for the block. // int col_0 = col_group * cols_per_block; // Compute the column number for this thread. // int col = col_0 + threadIdx.x; if ( col >= array_row_stride ) return; // Shift the row numbers over so that they can easily be used // to compute the array idx. // int row_0s = row_0 << dim_size_lg; int row_9s = row_9 << dim_size_lg; // Check whether we should just load a value to shared memory. // If load_only is false then we both load the value and compute // a pixel. // bool load_only = threadIdx.x == 0 || threadIdx.x == cols_per_block + 1 || col == array_row_stride - 1; // Compute the array index for the "up" row. This will be loaded // to shared memory but nothing will be computed for it. // int idx = row_0s | col; s[siu] = a[idx]; // Increment by the row stride to obtain the address of the middle // row. We will compute a value for this in the first iteration // of the while loop below. // idx += array_row_stride; s[sidx] = a[idx]; // Compute the array index at which we should stop. // int idx_stop = row_9s | col; // Compute pixels for column col, starting from row_0s + // array_row_stride and ending at row_9s. // while ( idx < idx_stop ) { // Compute the address of the pixel in the row below us, // idx_next, and load it into shared memory. // int idx_next = idx + array_row_stride; s[sid] = a[idx_next]; // Wait for other threads in this block to finish writing // shared memory. // __syncthreads(); if ( !load_only ) { // Compute the pixel value and write it to b. #ifdef DEBUG_STENCIL b[idx] = v0 * s[sidx]; #else b[idx] = v0 * s[sidx] + v1 * ( s[sidx-1] + s[sidx+1] + s[siu] + s[sid] ) + v2 * ( s[siu-1] + s[siu+1] + s[sid-1] + s[sid+1] ); #endif } // Wait for other threads in this block to finish reading // shared memory. // __syncthreads(); // Rotate indices so that what is currently the middle row, // sidx, becomes the up row, and what is currently the down // row, sid, becomes the current row, and siu will be the sid. // int sid_new = siu; siu = sidx; sidx = sid; sid = sid_new; // Move the idx down one row. // idx = idx_next; } } } __global__ void stencil_shared_2() { /// SOLUTION - Homework 2 /// DO NOT edit this routine for Homework 3, instead modify stencil_shared_3. /// NOTE: This solution is inefficient. // // The code here will run more slowly than stencil_shared, at least // on CC 1.x devices, due to inefficient global and shared memory // access patterns. // // This code does not include unrolling. That will be added // later to another routine. // // For the solution to Problem 3a the code here is adjusted so that // each thread can operate on R pixels. Look for the "Times R" // comments. // int array_row_stride = 1 << dim_size_lg; int array_row_mask = array_row_stride - 1; // Adjust the number of columns that each block computes. // int cols_per_block = blockDim.x * homework_R - 2; // Times R int blocks_per_row = ceilf( float(array_row_stride-2) / cols_per_block ); int rows_per_block = ceilf( float(blocks_per_row) * array_row_stride / gridDim.x ); int row_0_large = rows_per_block * blockIdx.x; int row_9_large = row_0_large + rows_per_block + 2; int col_group_0 = row_0_large >> dim_size_lg; int col_group_9 = row_9_large >> dim_size_lg; // Adjust the indices into shared memory. // int siu = threadIdx.x * homework_R; // Times R int sidx = siu + blockDim.x * homework_R; // Times R int sid = sidx + blockDim.x * homework_R; // Times R for ( int col_group = col_group_0; col_group <= col_group_9; col_group++ ) { int row_0 = col_group == col_group_0 ? row_0_large & array_row_mask : 0; int row_9 = col_group == col_group_9 ? ( row_9_large & array_row_mask ) : array_row_mask; int col_0 = col_group * cols_per_block; int col_9 = min( col_0 + cols_per_block, array_row_stride - 2 ); // Since each thread handles R pixels need to multiply by R to // find the starting column number for a thread. // int col = col_0 + threadIdx.x * homework_R; // Times R if ( col >= array_row_stride ) return; int row_s = row_0 << dim_size_lg; int row_9s = row_9 << dim_size_lg; // Use loops to load data. // for ( int i=0; i<homework_R; i++ ) { // Compute column number for this element. // int coli = col + i; // If column out of range skip this element. // if ( coli >= array_row_stride ) continue; // Compute array index, and cache elements. // int idx = row_s + coli; s[siu+i] = a[ idx ]; s[sidx+i] = a[ idx + array_row_stride ]; } row_s += array_row_stride; while ( row_s < row_9s ) { int row_next = row_s + array_row_stride; for ( int i=0; i<homework_R; i++ ) { // Cache the next row of elements. // int coli = col + i; if ( coli < array_row_stride ) s[sid + i] = a[ row_next + coli ]; } __syncthreads(); // Use a loop for computation. // for ( int i=0; i<homework_R; i++ ) { // Compute column and if out of range, skip this element. // int coli = col + i; if ( coli == col_0 || coli > col_9 ) continue; int idx = row_s + coli; #ifdef DEBUG_STENCIL b[idx] = v0 * s[sidx+i]; #else b[idx] = v0 * s[sidx+i] + v1 * ( s[sidx+i-1] + s[sidx+i+1] + s[siu+i] + s[sid+i] ) + v2 * ( s[siu+i-1] + s[siu+i+1] + s[sid+i-1] + s[sid+i+1] ); #endif } __syncthreads(); int sid_new = siu; siu = sidx; sidx = sid; sid = sid_new; row_s = row_next; } } } __global__ void stencil_shared_3() { /// SOLVE HOMEWORK 3 HERE /// NOTE: This code is inefficient. // // The code here will run more slowly than stencil_shared, at least // on CC 1.x devices, due to inefficient global and shared memory // access patterns. // // Fix it. int array_row_stride = 1 << dim_size_lg; int array_row_mask = array_row_stride - 1; // Adjust the number of columns that each block computes. // int cols_per_block = blockDim.x * homework_R - 2; // Times R int blocks_per_row = ceilf( float(array_row_stride-2) / cols_per_block ); int rows_per_block = ceilf( float(blocks_per_row) * array_row_stride / gridDim.x ); int row_0_large = rows_per_block * blockIdx.x; int row_9_large = row_0_large + rows_per_block + 2; int col_group_0 = row_0_large >> dim_size_lg; int col_group_9 = row_9_large >> dim_size_lg; // Adjust the indices into shared memory. // int siu = threadIdx.x;// *homework_R; // Times R int sidx = siu + blockDim.x*homework_R ; // Times R int sid = sidx + blockDim.x*homework_R ; // Times R for ( int col_group = col_group_0; col_group <= col_group_9; col_group++ ) { int row_0 = col_group == col_group_0 ? row_0_large & array_row_mask : 0; int row_9 = col_group == col_group_9 ? ( row_9_large & array_row_mask ) : array_row_mask; int col_0 = col_group * cols_per_block; int col_9 = min( col_0 + cols_per_block, array_row_stride - 2 ); // Since each thread handles R pixels need to multiply by R to // find the starting column number for a thread. // int col = col_0 + threadIdx.x; // Times R if ( col >= array_row_stride ) return; int row_s = row_0 << dim_size_lg; int row_9s = row_9 << dim_size_lg; // Use loops to load data. // for ( int i=0; i<blockDim.x*homework_R; i+=blockDim.x ) { // Compute column number for this element. // int coli = col + i; // If column out of range skip this element. // if ( coli >= array_row_stride ) continue; // Compute array index, and cache elements. // int idx = row_s + coli; s[siu+i] = a[ idx ]; s[sidx+i] = a[ idx + array_row_stride ]; } row_s += array_row_stride; while ( row_s < row_9s ) { int row_next = row_s + array_row_stride; for ( int i=0; i<blockDim.x*homework_R; i+=blockDim.x ) { // Cache the next row of elements. // int coli = col + i; if ( coli < array_row_stride ) s[sid + i] = a[ row_next + coli ]; } __syncthreads(); // Use a loop for computation. // for ( int i=0; i<blockDim.x*homework_R; i+=blockDim.x ) { // Compute column and if out of range, skip this element. // int coli = col + i; if ( coli == col_0 || coli > col_9 ) continue; int idx = row_s + coli; #ifdef DEBUG_STENCIL b[idx] = v0 * s[sidx+i]; #else b[idx] = v0 * s[sidx+i] + v1 * ( s[sidx+i-1] + s[sidx+i+1] + s[siu+i] + s[sid+i] ) + v2 * ( s[siu+i-1] + s[siu+i+1] + s[sid+i-1] + s[sid+i+1] ); #endif } __syncthreads(); int sid_new = siu; siu = sidx; sidx = sid; sid = sid_new; row_s = row_next; } } }
0307c2f16b53912f6e8ce9bea9d4f0b72d08779f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions normal z -> c d s */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif // ELLPACK SpMV kernel //Michael Garland __global__ void zgeellmv_kernel( int num_rows, int num_cols, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = dcolind [ num_cols_per_row * row + n ]; magmaDoubleComplex val = dval [ num_cols_per_row * row + n ]; if( val != 0) dot += val * dx[col ]; } dy[ row ] = dot * alpha + beta * dy [ row ]; } } // shifted ELLPACK SpMV kernel //Michael Garland __global__ void zgeellmv_kernel_shift( int num_rows, int num_cols, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaDoubleComplex * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = dcolind [ num_cols_per_row * row + n ]; magmaDoubleComplex val = dval [ num_cols_per_row * row + n ]; if( val != 0) dot += val * dx[col ]; } if( row<blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. Input format is ELLPACK. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgeellmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( zgeellmv_kernel), dim3(grid), dim3(threads), 0, queue , m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU. Input format is ELLPACK. It is the shifted version of the ELLPACK SpMV. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] lambda magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgeellmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, int offset, int blocksize, magmaIndex_ptr addrows, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( zgeellmv_kernel_shift), dim3(grid), dim3(threads), 0, queue , m, n, nnz_per_row, alpha, lambda, dval, dcolind, dx, beta, offset, blocksize, addrows, dy ); return MAGMA_SUCCESS; }
0307c2f16b53912f6e8ce9bea9d4f0b72d08779f.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions normal z -> c d s */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif // ELLPACK SpMV kernel //Michael Garland __global__ void zgeellmv_kernel( int num_rows, int num_cols, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = dcolind [ num_cols_per_row * row + n ]; magmaDoubleComplex val = dval [ num_cols_per_row * row + n ]; if( val != 0) dot += val * dx[col ]; } dy[ row ] = dot * alpha + beta * dy [ row ]; } } // shifted ELLPACK SpMV kernel //Michael Garland __global__ void zgeellmv_kernel_shift( int num_rows, int num_cols, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaDoubleComplex * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = dcolind [ num_cols_per_row * row + n ]; magmaDoubleComplex val = dval [ num_cols_per_row * row + n ]; if( val != 0) dot += val * dx[col ]; } if( row<blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. Input format is ELLPACK. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgeellmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); magma_int_t threads = BLOCK_SIZE; zgeellmv_kernel<<< grid, threads, 0, queue >>> ( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU. Input format is ELLPACK. It is the shifted version of the ELLPACK SpMV. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] lambda magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgeellmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, int offset, int blocksize, magmaIndex_ptr addrows, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); magma_int_t threads = BLOCK_SIZE; zgeellmv_kernel_shift<<< grid, threads, 0, queue >>> ( m, n, nnz_per_row, alpha, lambda, dval, dcolind, dx, beta, offset, blocksize, addrows, dy ); return MAGMA_SUCCESS; }
f3a71f5f74edf74dc5603f2c7d20055676ba16a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layers/projection_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ProjectionForward(const int n, const Dtype* in1, const Dtype* in2, Dtype* out1, Dtype* out2, const Dtype* norm) { CUDA_KERNEL_LOOP(index, n) { out1[index] = norm[index] > 1 ? in1[index]/norm[index] : in1[index]; out2[index] = norm[index] > 1 ? in2[index]/norm[index] : in2[index]; } } template <typename Dtype> void ProjectionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* xi1 = bottom[0]->gpu_data(); const Dtype* xi2 = bottom[1]->gpu_data(); Dtype* eta1 = top[0]->mutable_gpu_data(); Dtype* eta2 = top[1]->mutable_gpu_data(); const int count = bottom[0]->count(); //x1_s = xi1^2, x2_s = xi2^2 caffe_gpu_mul(count,xi1,xi1,xi1_s->mutable_gpu_data()); caffe_gpu_mul(count,xi2,xi2,xi2_s->mutable_gpu_data()); //norm_xi = xi1^2+xi2^2 caffe_gpu_add(count, xi1_s->cpu_data(),xi2_s->cpu_data(),norm_xi->mutable_cpu_data()); //norm_xi = sqrt(xi1^2+xi2^2) caffe_gpu_powx(count,norm_xi->gpu_data(),Dtype(0.5),norm_xi->mutable_gpu_data()); hipLaunchKernelGGL(( ProjectionForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, xi1, xi2, eta1, eta2, norm_xi->gpu_data()); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void ProjectionBackward(const int n, const Dtype* top1_diff, const Dtype* bottom1_data, const Dtype* top2_diff, const Dtype* bottom2_data, Dtype* bottom1_diff, Dtype* bottom2_diff, const Dtype* norm) { CUDA_KERNEL_LOOP(index, n) { bottom1_diff[index] = norm[index] > 1 ? -top2_diff[index]*bottom1_data[index]*bottom2_data[index]*powf(norm[index],-3.0) + top1_diff[index]*(1/norm[index]-powf(bottom1_data[index],Dtype(2))*powf(norm[index],Dtype(-3.0))):top1_diff[index]; bottom2_diff[index] = norm[index] > 1 ? -top1_diff[index]*bottom1_data[index]*bottom2_data[index]*powf(norm[index],-3.0) + top2_diff[index]*(1/norm[index]-powf(bottom2_data[index],Dtype(2))*powf(norm[index],Dtype(-3.0))):top2_diff[index]; } } template <typename Dtype> void ProjectionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* xi1 = bottom[0]->gpu_data(); const Dtype* xi2 = bottom[1]->gpu_data(); const Dtype* norm = norm_xi->gpu_data(); const Dtype* eta1_diff = top[0]->gpu_diff(); const Dtype* eta2_diff = top[1]->gpu_diff(); const int count = bottom[0]->count(); Dtype* xi1_diff = bottom[0]->mutable_gpu_diff(); Dtype* xi2_diff = bottom[1]->mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ProjectionBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, eta1_diff, xi1, eta2_diff, xi2, xi1_diff, xi2_diff, norm); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ProjectionLayer); } // namespace caffe
f3a71f5f74edf74dc5603f2c7d20055676ba16a1.cu
#include <algorithm> #include <vector> #include "caffe/layers/projection_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ProjectionForward(const int n, const Dtype* in1, const Dtype* in2, Dtype* out1, Dtype* out2, const Dtype* norm) { CUDA_KERNEL_LOOP(index, n) { out1[index] = norm[index] > 1 ? in1[index]/norm[index] : in1[index]; out2[index] = norm[index] > 1 ? in2[index]/norm[index] : in2[index]; } } template <typename Dtype> void ProjectionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* xi1 = bottom[0]->gpu_data(); const Dtype* xi2 = bottom[1]->gpu_data(); Dtype* eta1 = top[0]->mutable_gpu_data(); Dtype* eta2 = top[1]->mutable_gpu_data(); const int count = bottom[0]->count(); //x1_s = xi1^2, x2_s = xi2^2 caffe_gpu_mul(count,xi1,xi1,xi1_s->mutable_gpu_data()); caffe_gpu_mul(count,xi2,xi2,xi2_s->mutable_gpu_data()); //norm_xi = xi1^2+xi2^2 caffe_gpu_add(count, xi1_s->cpu_data(),xi2_s->cpu_data(),norm_xi->mutable_cpu_data()); //norm_xi = sqrt(xi1^2+xi2^2) caffe_gpu_powx(count,norm_xi->gpu_data(),Dtype(0.5),norm_xi->mutable_gpu_data()); ProjectionForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, xi1, xi2, eta1, eta2, norm_xi->gpu_data()); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void ProjectionBackward(const int n, const Dtype* top1_diff, const Dtype* bottom1_data, const Dtype* top2_diff, const Dtype* bottom2_data, Dtype* bottom1_diff, Dtype* bottom2_diff, const Dtype* norm) { CUDA_KERNEL_LOOP(index, n) { bottom1_diff[index] = norm[index] > 1 ? -top2_diff[index]*bottom1_data[index]*bottom2_data[index]*powf(norm[index],-3.0) + top1_diff[index]*(1/norm[index]-powf(bottom1_data[index],Dtype(2))*powf(norm[index],Dtype(-3.0))):top1_diff[index]; bottom2_diff[index] = norm[index] > 1 ? -top1_diff[index]*bottom1_data[index]*bottom2_data[index]*powf(norm[index],-3.0) + top2_diff[index]*(1/norm[index]-powf(bottom2_data[index],Dtype(2))*powf(norm[index],Dtype(-3.0))):top2_diff[index]; } } template <typename Dtype> void ProjectionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* xi1 = bottom[0]->gpu_data(); const Dtype* xi2 = bottom[1]->gpu_data(); const Dtype* norm = norm_xi->gpu_data(); const Dtype* eta1_diff = top[0]->gpu_diff(); const Dtype* eta2_diff = top[1]->gpu_diff(); const int count = bottom[0]->count(); Dtype* xi1_diff = bottom[0]->mutable_gpu_diff(); Dtype* xi2_diff = bottom[1]->mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) ProjectionBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, eta1_diff, xi1, eta2_diff, xi2, xi1_diff, xi2_diff, norm); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ProjectionLayer); } // namespace caffe
3071e62bee34aa2b237dfb46d463dbf3ac33f98c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Revisado #include "Main.cuh" extern int NRAD, NSEC, size_grid; extern float *Label_d, *Rmed, *Rmed_d; extern dim3 dimGrid2, dimBlock2; __host__ void InitLabel (float *Label, PlanetarySystem *sys) { double xp,yp, rhill, rp; xp = sys->x[0]; yp = sys->y[0]; rp = sqrt(xp*xp+yp*yp); rhill = rp * pow(sys->mass[0]/3., 1./3); hipLaunchKernelGGL(( InitLabelKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Label_d, xp, yp, rhill, Rmed_d, NRAD, NSEC); gpuErrchk(hipDeviceSynchronize()); } __host__ void Initialization (float *Dens, float *Vrad, float *Vtheta, float *Energy, float *Label, PlanetarySystem *sys) { InitEuler (Vrad, Vtheta, Dens, Energy); InitLabel (Label, sys); WriteDim(); }
3071e62bee34aa2b237dfb46d463dbf3ac33f98c.cu
// Revisado #include "Main.cuh" extern int NRAD, NSEC, size_grid; extern float *Label_d, *Rmed, *Rmed_d; extern dim3 dimGrid2, dimBlock2; __host__ void InitLabel (float *Label, PlanetarySystem *sys) { double xp,yp, rhill, rp; xp = sys->x[0]; yp = sys->y[0]; rp = sqrt(xp*xp+yp*yp); rhill = rp * pow(sys->mass[0]/3., 1./3); InitLabelKernel<<<dimGrid2, dimBlock2>>>(Label_d, xp, yp, rhill, Rmed_d, NRAD, NSEC); gpuErrchk(cudaDeviceSynchronize()); } __host__ void Initialization (float *Dens, float *Vrad, float *Vtheta, float *Energy, float *Label, PlanetarySystem *sys) { InitEuler (Vrad, Vtheta, Dens, Energy); InitLabel (Label, sys); WriteDim(); }
c2a449229a3fb78badb609d170d107012c92077f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <math.h> #include <stdio.h> #include <sys/time.h> /* * hostmultiply.cu * Copyright 2012 Guy Dickinson <[email protected]> * * Written for "GPUs: Architecture and Programming" * Prof. M. Zahran, New York University * * Derived in part from code in "Programming Massively Parallel Processors: * A Hands-On Approach" by David Kirk and Wen-mei Hwu. */ // Vanilla matrix multiplication on the host void matrixMulOnHost(float* M, float* N, float* P, int width) { for (int i = 0; i < width; ++i) for (int j = 0; j < width; ++j) { double sum = 0; for (int k = 0; k < width; ++k) { double a = M[i * width + k]; double b = N[k * width + j]; sum += a * b; } P[i * width + j] = sum; } } // Matrix Multiplication Kernel __global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int width) { int tx = threadIdx.x; int ty = threadIdx.y; float pvalue = 0; for (int k = 0; k < width; ++k) { float Mdelement = Md[ty * width + k]; float Ndelement = Nd[ty * width + k]; pvalue += Mdelement * Ndelement; } Pd[ty * width + tx] = pvalue; } // M and N are matrices to be multiplied // P is the result void cudaMatrixMul(float* M, float* N, float* P, int width) { int size = width * width * sizeof(float); float* Md; float* Nd; float* Pd; // Transfer M and N to device memory hipMalloc(&Md, size); hipMemcpy(Md, M, size, hipMemcpyHostToDevice); hipMalloc(&Nd, size); hipMemcpy(Nd, N, size, hipMemcpyHostToDevice); // Allocate P on the device hipMalloc(&Pd, size); // Invocation dim3 dimBlock(width, width); dim3 dimGrid(1,1); hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd, width); // Transfer P from device to host hipMemcpy(P, Pd, size, hipMemcpyDeviceToHost); // Free device matrices hipFree(Md); hipFree(Pd); hipFree(Nd); } void runTest(void) { int doublings = 8; int widths[doublings]; for (int i = 1; i <= doublings; i++) { widths[i - 1] = pow(2, i + 2); } for (int i = 0; i < doublings; i ++) { int width = widths[i]; int size = width * width * sizeof(float); timeval serialStart, serialEnd; timeval parallelStart, parallelEnd; double serialElapsedTime; double parallelElapsedTime; float *m; float *n; float *p; m = (float*) malloc(size); n = (float*) malloc(size); p = (float*) malloc(size); for (int j = 0; j < width * width; j++) { n[j] = 0.0; m[j] = 1.0; } gettimeofday(&serialStart, NULL); matrixMulOnHost(m, n, p, width); gettimeofday(&serialEnd, NULL); gettimeofday(&parallelStart, NULL); cudaMatrixMul(m, n, p, width); gettimeofday(&parallelEnd, NULL); serialElapsedTime = (serialEnd.tv_sec - serialStart.tv_sec) * 1000.0; serialElapsedTime += (serialEnd.tv_usec - serialStart.tv_usec) / 1000.0; parallelElapsedTime = (parallelEnd.tv_sec - parallelStart.tv_sec) * 1000.0; parallelElapsedTime += (parallelEnd.tv_usec - parallelStart.tv_usec) / 1000.0; double speedup = (serialElapsedTime / parallelElapsedTime) * 100.0; printf("%d x %d: Serial: %f\t\tParallel %f\t(%f%% Speedup)\n", width, width, serialElapsedTime, parallelElapsedTime, speedup); free(m); free(n); } } int main(void) { runTest(); }
c2a449229a3fb78badb609d170d107012c92077f.cu
#include <cuda.h> #include <math.h> #include <stdio.h> #include <sys/time.h> /* * hostmultiply.cu * Copyright 2012 Guy Dickinson <[email protected]> * * Written for "GPUs: Architecture and Programming" * Prof. M. Zahran, New York University * * Derived in part from code in "Programming Massively Parallel Processors: * A Hands-On Approach" by David Kirk and Wen-mei Hwu. */ // Vanilla matrix multiplication on the host void matrixMulOnHost(float* M, float* N, float* P, int width) { for (int i = 0; i < width; ++i) for (int j = 0; j < width; ++j) { double sum = 0; for (int k = 0; k < width; ++k) { double a = M[i * width + k]; double b = N[k * width + j]; sum += a * b; } P[i * width + j] = sum; } } // Matrix Multiplication Kernel __global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int width) { int tx = threadIdx.x; int ty = threadIdx.y; float pvalue = 0; for (int k = 0; k < width; ++k) { float Mdelement = Md[ty * width + k]; float Ndelement = Nd[ty * width + k]; pvalue += Mdelement * Ndelement; } Pd[ty * width + tx] = pvalue; } // M and N are matrices to be multiplied // P is the result void cudaMatrixMul(float* M, float* N, float* P, int width) { int size = width * width * sizeof(float); float* Md; float* Nd; float* Pd; // Transfer M and N to device memory cudaMalloc(&Md, size); cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice); cudaMalloc(&Nd, size); cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice); // Allocate P on the device cudaMalloc(&Pd, size); // Invocation dim3 dimBlock(width, width); dim3 dimGrid(1,1); MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, width); // Transfer P from device to host cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost); // Free device matrices cudaFree(Md); cudaFree(Pd); cudaFree(Nd); } void runTest(void) { int doublings = 8; int widths[doublings]; for (int i = 1; i <= doublings; i++) { widths[i - 1] = pow(2, i + 2); } for (int i = 0; i < doublings; i ++) { int width = widths[i]; int size = width * width * sizeof(float); timeval serialStart, serialEnd; timeval parallelStart, parallelEnd; double serialElapsedTime; double parallelElapsedTime; float *m; float *n; float *p; m = (float*) malloc(size); n = (float*) malloc(size); p = (float*) malloc(size); for (int j = 0; j < width * width; j++) { n[j] = 0.0; m[j] = 1.0; } gettimeofday(&serialStart, NULL); matrixMulOnHost(m, n, p, width); gettimeofday(&serialEnd, NULL); gettimeofday(&parallelStart, NULL); cudaMatrixMul(m, n, p, width); gettimeofday(&parallelEnd, NULL); serialElapsedTime = (serialEnd.tv_sec - serialStart.tv_sec) * 1000.0; serialElapsedTime += (serialEnd.tv_usec - serialStart.tv_usec) / 1000.0; parallelElapsedTime = (parallelEnd.tv_sec - parallelStart.tv_sec) * 1000.0; parallelElapsedTime += (parallelEnd.tv_usec - parallelStart.tv_usec) / 1000.0; double speedup = (serialElapsedTime / parallelElapsedTime) * 100.0; printf("%d x %d: Serial: %f\t\tParallel %f\t(%f%% Speedup)\n", width, width, serialElapsedTime, parallelElapsedTime, speedup); free(m); free(n); } } int main(void) { runTest(); }
98611e2eca9e9e6e058806be1c2b943800523491.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Inter-block reduction. // // Function gridReduce performs point-wise reductions of scalars across thread // blocks. Thread blocks are disjointly partitioned into groups of thread // blocks, "reduction segments," that are collectively defined by boolean // template parameters, X_BLOCK, Y_BLOCK and Z_BLOCK. Each of X/Y/Z_BLOCK // determines whether thread blocks along the dimension should be grouped into // the same reduction segment. Cross-block reducitons are independently done // within each segment and generates distinctive results per segment. For // instance, if all of X/Y/Z_BLOCK are true, reductions will be done across all // thread blocks since there will be just a single segment consisting of all // thread blocks. If none of them are true, each thread block will become a // segment by itself, so no reduction will be performed. // // The input scalars to reduce within each segment are a certain subset of // thread-private scalars provided as part of the gridReduce function // parameters. Boolean template parameters, X_THREAD, Y_THREAD and Z_THREAD, // determine which subset of the scalars should be used for inter-block // reductions. Specifically, all the input scalars of threads along each // dimension will be used when X/Y/Z_THREAD are true. Otherwise, only the value // held at offset 0 of each dimension will be used. Thus, for example, if all of // X/Y/Z_THREAD are true, the scalars of all threads in each block will // participate in inter-block reductions. If all of them are false, only one // scalar of the thread at threadIdx.x == threadIdx.y == threadIdx.z == 0 will // be used. In the code below, we call the subset of threads a "reduction // block." // // Inter-block reductions perform point-wise reductions of scalars of reduction // blocks within each reduction segment. More specifically, let rb be a // reduction block and rs be a reduction segment. Let IN(thread_idx, block_idx) // denote the input scalar of thread at thread_idx and block_idx. The result of // each reduction segment, OUT(thread_idx, block_idx_out), is defined only for // each thread_idx in thread block block_idx_out in the segment as follows: // // OUT(thread_idx, block_idx_out) = // Reduction of IN(thread_idx, block_idx) for // all block_idx in a reduction segment // // OUT is not given for all threads that are not in block_idx_out and the // reduction block. // // See also the function comment of gridReduce. namespace reduction { // Utility functions template <typename _dim3> __device__ __forceinline__ size_t size(const _dim3& d) { return (size_t)d.x * (size_t)d.y * (size_t)d.z; } #define isize(d) d.x* d.y* d.z template <typename _dim3pos, typename _dim3dim> __device__ __forceinline__ size_t offset(const _dim3pos& pos, const _dim3dim& dim) { return (size_t)pos.x + (size_t)pos.y * (size_t)dim.x + (size_t)pos.z * (size_t)dim.x * (size_t)dim.y; } #define ioffset(pos, dim) pos.x + pos.y* dim.x + pos.z* dim.x* dim.y // Returns dim3 of each reduction segment. template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3> __device__ dim3 dimension_of_reduction_segment(const _dim3& grid_dim) { return dim3{X_BLOCK ? grid_dim.x : 1, Y_BLOCK ? grid_dim.y : 1, Z_BLOCK ? grid_dim.z : 1}; } // Returns the number of blocks in each reduction segment. template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3> __device__ size_t size_of_reduction_segment(const _dim3& grid_dim) { return size( dimension_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(grid_dim)); } // Returns the total number of reduction segments. template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3> __device__ size_t number_of_reduction_segments(const _dim3& grid_dim) { return (X_BLOCK ? 1 : grid_dim.x) * (Y_BLOCK ? 1 : grid_dim.y) * (Z_BLOCK ? 1 : grid_dim.z); } // Returns the 1-D index of the segment of thread block of block_idx. template < bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3bi, typename _dim3gd> __device__ size_t index_of_reduction_segment(const _dim3bi& block_idx, const _dim3gd& grid_dim) { size_t seg_idx = 0; if (!Z_BLOCK) seg_idx += block_idx.z; if (!Y_BLOCK) seg_idx = seg_idx * grid_dim.y + block_idx.y; if (!X_BLOCK) seg_idx = seg_idx * grid_dim.x + block_idx.x; return seg_idx; } // Returns the offset of thread block in its reduction segment. template < bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3bi, typename _dim3gd> __device__ size_t offset_in_reduction_segment(const _dim3bi& block_idx, const _dim3gd& grid_dim) { size_t offset = 0; if (Z_BLOCK) offset = offset * grid_dim.z + block_idx.z; if (Y_BLOCK) offset = offset * grid_dim.y + block_idx.y; if (X_BLOCK) offset = offset * grid_dim.x + block_idx.x; return offset; } // Returns dim3 of each reduction block. template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3> __device__ dim3 dimension_of_reduction_block(const _dim3& block_dim) { return dim3{X_THREAD ? block_dim.x : 1, Y_THREAD ? block_dim.y : 1, Z_THREAD ? block_dim.z : 1}; } // Returns the number of threads of each reduction block. template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3> __device__ int size_of_reduction_block(const _dim3& block_dim) { auto tmp_dim = dimension_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(block_dim); return isize(tmp_dim); } // Returns the linear offset of a thread in a reduction block. template < bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3ti, typename _dim3bd> __device__ int offset_in_reduction_block( const _dim3ti& thread_idx, const _dim3bd& block_dim) { int offset = 0; if (Z_THREAD) offset += thread_idx.z; if (Y_THREAD) offset = offset * block_dim.y + thread_idx.y; if (X_THREAD) offset = offset * block_dim.x + thread_idx.x; return offset; } // Reduces all the reduction blocks in each reduction segment. // // This is only used by one thread block per reduction segment. The input // reduction blocks of the segment are stored in an intermediate buffer pointed // by parameter in. Template parameters X/Y/Z_THREAD denote how the reduction // block is formed. // // The size of a reduction block is by definition smaller or equal to the size // of a thread block. We use the remaining threads to parallelize reductions // across reduction blocks. For example, when X/Y/Z_THREAD = {true, false, // false}, we use blockDim.y*blockDim.z threads for each output value. This is // done first by loading the input values in parallel and then by reducing // across threads of dimensions whose XYZ_THREAD are false. // // Note that what is done here after the loading from global memory is similar // to what the existing blockReduce function does. The main difference is that // the logical block to reduce is a 2D domain where the leading dimension is the // size of a reduction block and the second dimension is the remaining factor in // each thread block. For example, when X/Y/Z_THREAD = {false, true, false}, the // threads are arranged as (blockDim.y, blockDim.x*blockDim.z). We do not reduce // along the first dimension but only the second dimension. So, it is possible // to reuse the existing blockReduce with dim3{blockDim.y, // blockDim.x*blockDim.z} instead of blockDim and with X_THREAD and Y_THREAD // being false and true, respectively. Also, it still need to shuffle the final // output values to their actual corresponding threads. In the case of when // X/Y/Z_THREAD = {false, true, false}, after the intra-block reduction, the // final results will still be held by the first blockDim.y threads, which need // to be transferred to threads at threadIdx.x == 0 and threadIdx.z == 0. template < bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename T, typename Func> __device__ void gridReduceLastBlock( T& out, const T* in, const size_t in_size, Func reduction_op, T* shared_buf, bool read_write_pred, T init_val) { const int tid = ioffset(threadIdx, blockDim); const int block_size = isize(blockDim); const int rblock_size = size_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(blockDim); T inp = init_val; if (tid < in_size) { inp = in[tid]; } for (size_t i = tid + block_size; i < in_size; i += block_size) { reduction_op(inp, in[i]); } const auto should_write = (X_THREAD || threadIdx.x == 0) && (Y_THREAD || threadIdx.y == 0) && (Z_THREAD || threadIdx.z == 0); auto rem_size = block_size / rblock_size; if (rem_size > 1) { const int rblock_offset = tid % rblock_size; const int rblock_idx = tid / rblock_size; blockReduce<false, true, false>( inp, inp, reduction_op, dim3{(unsigned)rblock_offset, (unsigned)rblock_idx, 0}, dim3{(unsigned)rblock_size, (unsigned)rem_size}, shared_buf, true, init_val); __syncthreads(); if (tid < rblock_size) { shared_buf[tid] = inp; } __syncthreads(); if (should_write) { inp = shared_buf[offset_in_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>( threadIdx, blockDim)]; } } if (should_write && read_write_pred) { out = inp; } } // Reduces per-thread values across thread blocks. // // Function parameters: // - out: Per-thread output location // - inp_val: Per-thread input value // - reduction_op: Scalar reduction function // - work_buf: Temporary buffer for cross-block reductions // - sync_flags: A vector of integers for synchronizations // - shared_buf: Shared memory buffer for intra-block reduction // // Return true when the thread block has the valid result. // // Template parameters: // - X/Y/Z_BLOCK: When true, reduces across thread blocks along the X/Y/Z // dimensions // - X/Y/Z_THREAD: When true, all threads along the X/Y/Z dimensions participate // in the cross-block reduction. Otherwise, only threads at offset 0 do. // - T: Scalar data type of input/output data // - Func: Type of scalara reduction function // // Template parameters X/Y/Z_BLOCK define a group of thread blocks that are // reduced together. We call it a reduction segment. Some examples are: // // Case 1: X/Y/Z_BLOCK == true/true/true -> There is only one segment, which // includes all thread blocks. It is effecively the same as the grid. // // Case 2: X/Y/Z_BLOCK == false/false/false -> Each thread block comprises an // individual segment by itself. // // Case 3: X/Y/Z_BLOCK == true/false/false -> Each segment contains thread // blocks that have the same blockDim.x. There will be blockDim.y*blockDim.z // such segments. // // X/Y/Z_THREAD defines a sub region of a thread block that should be reduced // with the sub regions of other thread blocks. We call it a reduction block. // E.g., // // Case 1: X/Y/Z_THREAD == false/false/false -> Only thread 0 participates in // the cross-block reductions. The reduction block is 1x1x1 with thread 0. // // Case 2: X/Y/Z_THREAD == true/true/true-> All threads in a thread block // participate in the cross-block reductions. The reduction block in this case // is equivalent to the thread block. // // After the function completes, only one thread block per reduction segment // gets valid reduction results. There is no guarantee which particular block // gets the final results. // template < bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename T, typename Func> __device__ bool gridReduce( T& out, T inp_val, Func reduction_op, volatile T* work_buf, Tensor<int64_t, 1> sync_flags, T* shared_buf, bool read_write_pred, T init_val) { // Number of values to reduce in the grid dimensions const auto seg_size = size_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(gridDim); // Index of the reduction we're performing out of the seg_size const auto seg_idx = index_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(blockIdx, gridDim); // Number of threads we can use in final reduction, Seems to assume all // threads in the block participate const auto rblock_size = size_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(blockDim); // advance to the offset for this segment // index of reduction * size of the reduction * size of threads work_buf += seg_idx * seg_size * rblock_size; if ((X_THREAD || threadIdx.x == 0) && (Y_THREAD || threadIdx.y == 0) && (Z_THREAD || threadIdx.z == 0)) { auto rblock_offset = offset_in_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>( blockIdx, gridDim); auto thread_offset = offset_in_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>( threadIdx, blockDim); auto work_buf_offset = rblock_size * rblock_offset + thread_offset; if (read_write_pred) { work_buf[work_buf_offset] = inp_val; } else { work_buf[work_buf_offset] = init_val; } } __syncthreads(); __shared__ bool last_block; if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { __threadfence(); // printf("%ld\n", sync_flags[seg_idx]); auto old = (int64_t)atomicAdd((unsigned long long*)&sync_flags[seg_idx], 1); last_block = old + 1 == seg_size; // printf("Last_block = %d + 1 == %d\n", (int)old, (int)seg_size); } __syncthreads(); if (last_block) { // printf("Last block %d %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z); // final reduction gridReduceLastBlock<X_THREAD, Y_THREAD, Z_THREAD>( out, (T*)work_buf, seg_size * rblock_size, reduction_op, shared_buf, read_write_pred, init_val); return true; } else { // printf("Not last block %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z); return false; } } } // namespace reduction
98611e2eca9e9e6e058806be1c2b943800523491.cu
// Inter-block reduction. // // Function gridReduce performs point-wise reductions of scalars across thread // blocks. Thread blocks are disjointly partitioned into groups of thread // blocks, "reduction segments," that are collectively defined by boolean // template parameters, X_BLOCK, Y_BLOCK and Z_BLOCK. Each of X/Y/Z_BLOCK // determines whether thread blocks along the dimension should be grouped into // the same reduction segment. Cross-block reducitons are independently done // within each segment and generates distinctive results per segment. For // instance, if all of X/Y/Z_BLOCK are true, reductions will be done across all // thread blocks since there will be just a single segment consisting of all // thread blocks. If none of them are true, each thread block will become a // segment by itself, so no reduction will be performed. // // The input scalars to reduce within each segment are a certain subset of // thread-private scalars provided as part of the gridReduce function // parameters. Boolean template parameters, X_THREAD, Y_THREAD and Z_THREAD, // determine which subset of the scalars should be used for inter-block // reductions. Specifically, all the input scalars of threads along each // dimension will be used when X/Y/Z_THREAD are true. Otherwise, only the value // held at offset 0 of each dimension will be used. Thus, for example, if all of // X/Y/Z_THREAD are true, the scalars of all threads in each block will // participate in inter-block reductions. If all of them are false, only one // scalar of the thread at threadIdx.x == threadIdx.y == threadIdx.z == 0 will // be used. In the code below, we call the subset of threads a "reduction // block." // // Inter-block reductions perform point-wise reductions of scalars of reduction // blocks within each reduction segment. More specifically, let rb be a // reduction block and rs be a reduction segment. Let IN(thread_idx, block_idx) // denote the input scalar of thread at thread_idx and block_idx. The result of // each reduction segment, OUT(thread_idx, block_idx_out), is defined only for // each thread_idx in thread block block_idx_out in the segment as follows: // // OUT(thread_idx, block_idx_out) = // Reduction of IN(thread_idx, block_idx) for // all block_idx in a reduction segment // // OUT is not given for all threads that are not in block_idx_out and the // reduction block. // // See also the function comment of gridReduce. namespace reduction { // Utility functions template <typename _dim3> __device__ __forceinline__ size_t size(const _dim3& d) { return (size_t)d.x * (size_t)d.y * (size_t)d.z; } #define isize(d) d.x* d.y* d.z template <typename _dim3pos, typename _dim3dim> __device__ __forceinline__ size_t offset(const _dim3pos& pos, const _dim3dim& dim) { return (size_t)pos.x + (size_t)pos.y * (size_t)dim.x + (size_t)pos.z * (size_t)dim.x * (size_t)dim.y; } #define ioffset(pos, dim) pos.x + pos.y* dim.x + pos.z* dim.x* dim.y // Returns dim3 of each reduction segment. template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3> __device__ dim3 dimension_of_reduction_segment(const _dim3& grid_dim) { return dim3{X_BLOCK ? grid_dim.x : 1, Y_BLOCK ? grid_dim.y : 1, Z_BLOCK ? grid_dim.z : 1}; } // Returns the number of blocks in each reduction segment. template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3> __device__ size_t size_of_reduction_segment(const _dim3& grid_dim) { return size( dimension_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(grid_dim)); } // Returns the total number of reduction segments. template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3> __device__ size_t number_of_reduction_segments(const _dim3& grid_dim) { return (X_BLOCK ? 1 : grid_dim.x) * (Y_BLOCK ? 1 : grid_dim.y) * (Z_BLOCK ? 1 : grid_dim.z); } // Returns the 1-D index of the segment of thread block of block_idx. template < bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3bi, typename _dim3gd> __device__ size_t index_of_reduction_segment(const _dim3bi& block_idx, const _dim3gd& grid_dim) { size_t seg_idx = 0; if (!Z_BLOCK) seg_idx += block_idx.z; if (!Y_BLOCK) seg_idx = seg_idx * grid_dim.y + block_idx.y; if (!X_BLOCK) seg_idx = seg_idx * grid_dim.x + block_idx.x; return seg_idx; } // Returns the offset of thread block in its reduction segment. template < bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3bi, typename _dim3gd> __device__ size_t offset_in_reduction_segment(const _dim3bi& block_idx, const _dim3gd& grid_dim) { size_t offset = 0; if (Z_BLOCK) offset = offset * grid_dim.z + block_idx.z; if (Y_BLOCK) offset = offset * grid_dim.y + block_idx.y; if (X_BLOCK) offset = offset * grid_dim.x + block_idx.x; return offset; } // Returns dim3 of each reduction block. template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3> __device__ dim3 dimension_of_reduction_block(const _dim3& block_dim) { return dim3{X_THREAD ? block_dim.x : 1, Y_THREAD ? block_dim.y : 1, Z_THREAD ? block_dim.z : 1}; } // Returns the number of threads of each reduction block. template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3> __device__ int size_of_reduction_block(const _dim3& block_dim) { auto tmp_dim = dimension_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(block_dim); return isize(tmp_dim); } // Returns the linear offset of a thread in a reduction block. template < bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3ti, typename _dim3bd> __device__ int offset_in_reduction_block( const _dim3ti& thread_idx, const _dim3bd& block_dim) { int offset = 0; if (Z_THREAD) offset += thread_idx.z; if (Y_THREAD) offset = offset * block_dim.y + thread_idx.y; if (X_THREAD) offset = offset * block_dim.x + thread_idx.x; return offset; } // Reduces all the reduction blocks in each reduction segment. // // This is only used by one thread block per reduction segment. The input // reduction blocks of the segment are stored in an intermediate buffer pointed // by parameter in. Template parameters X/Y/Z_THREAD denote how the reduction // block is formed. // // The size of a reduction block is by definition smaller or equal to the size // of a thread block. We use the remaining threads to parallelize reductions // across reduction blocks. For example, when X/Y/Z_THREAD = {true, false, // false}, we use blockDim.y*blockDim.z threads for each output value. This is // done first by loading the input values in parallel and then by reducing // across threads of dimensions whose XYZ_THREAD are false. // // Note that what is done here after the loading from global memory is similar // to what the existing blockReduce function does. The main difference is that // the logical block to reduce is a 2D domain where the leading dimension is the // size of a reduction block and the second dimension is the remaining factor in // each thread block. For example, when X/Y/Z_THREAD = {false, true, false}, the // threads are arranged as (blockDim.y, blockDim.x*blockDim.z). We do not reduce // along the first dimension but only the second dimension. So, it is possible // to reuse the existing blockReduce with dim3{blockDim.y, // blockDim.x*blockDim.z} instead of blockDim and with X_THREAD and Y_THREAD // being false and true, respectively. Also, it still need to shuffle the final // output values to their actual corresponding threads. In the case of when // X/Y/Z_THREAD = {false, true, false}, after the intra-block reduction, the // final results will still be held by the first blockDim.y threads, which need // to be transferred to threads at threadIdx.x == 0 and threadIdx.z == 0. template < bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename T, typename Func> __device__ void gridReduceLastBlock( T& out, const T* in, const size_t in_size, Func reduction_op, T* shared_buf, bool read_write_pred, T init_val) { const int tid = ioffset(threadIdx, blockDim); const int block_size = isize(blockDim); const int rblock_size = size_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(blockDim); T inp = init_val; if (tid < in_size) { inp = in[tid]; } for (size_t i = tid + block_size; i < in_size; i += block_size) { reduction_op(inp, in[i]); } const auto should_write = (X_THREAD || threadIdx.x == 0) && (Y_THREAD || threadIdx.y == 0) && (Z_THREAD || threadIdx.z == 0); auto rem_size = block_size / rblock_size; if (rem_size > 1) { const int rblock_offset = tid % rblock_size; const int rblock_idx = tid / rblock_size; blockReduce<false, true, false>( inp, inp, reduction_op, dim3{(unsigned)rblock_offset, (unsigned)rblock_idx, 0}, dim3{(unsigned)rblock_size, (unsigned)rem_size}, shared_buf, true, init_val); __syncthreads(); if (tid < rblock_size) { shared_buf[tid] = inp; } __syncthreads(); if (should_write) { inp = shared_buf[offset_in_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>( threadIdx, blockDim)]; } } if (should_write && read_write_pred) { out = inp; } } // Reduces per-thread values across thread blocks. // // Function parameters: // - out: Per-thread output location // - inp_val: Per-thread input value // - reduction_op: Scalar reduction function // - work_buf: Temporary buffer for cross-block reductions // - sync_flags: A vector of integers for synchronizations // - shared_buf: Shared memory buffer for intra-block reduction // // Return true when the thread block has the valid result. // // Template parameters: // - X/Y/Z_BLOCK: When true, reduces across thread blocks along the X/Y/Z // dimensions // - X/Y/Z_THREAD: When true, all threads along the X/Y/Z dimensions participate // in the cross-block reduction. Otherwise, only threads at offset 0 do. // - T: Scalar data type of input/output data // - Func: Type of scalara reduction function // // Template parameters X/Y/Z_BLOCK define a group of thread blocks that are // reduced together. We call it a reduction segment. Some examples are: // // Case 1: X/Y/Z_BLOCK == true/true/true -> There is only one segment, which // includes all thread blocks. It is effecively the same as the grid. // // Case 2: X/Y/Z_BLOCK == false/false/false -> Each thread block comprises an // individual segment by itself. // // Case 3: X/Y/Z_BLOCK == true/false/false -> Each segment contains thread // blocks that have the same blockDim.x. There will be blockDim.y*blockDim.z // such segments. // // X/Y/Z_THREAD defines a sub region of a thread block that should be reduced // with the sub regions of other thread blocks. We call it a reduction block. // E.g., // // Case 1: X/Y/Z_THREAD == false/false/false -> Only thread 0 participates in // the cross-block reductions. The reduction block is 1x1x1 with thread 0. // // Case 2: X/Y/Z_THREAD == true/true/true-> All threads in a thread block // participate in the cross-block reductions. The reduction block in this case // is equivalent to the thread block. // // After the function completes, only one thread block per reduction segment // gets valid reduction results. There is no guarantee which particular block // gets the final results. // template < bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename T, typename Func> __device__ bool gridReduce( T& out, T inp_val, Func reduction_op, volatile T* work_buf, Tensor<int64_t, 1> sync_flags, T* shared_buf, bool read_write_pred, T init_val) { // Number of values to reduce in the grid dimensions const auto seg_size = size_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(gridDim); // Index of the reduction we're performing out of the seg_size const auto seg_idx = index_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(blockIdx, gridDim); // Number of threads we can use in final reduction, Seems to assume all // threads in the block participate const auto rblock_size = size_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(blockDim); // advance to the offset for this segment // index of reduction * size of the reduction * size of threads work_buf += seg_idx * seg_size * rblock_size; if ((X_THREAD || threadIdx.x == 0) && (Y_THREAD || threadIdx.y == 0) && (Z_THREAD || threadIdx.z == 0)) { auto rblock_offset = offset_in_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>( blockIdx, gridDim); auto thread_offset = offset_in_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>( threadIdx, blockDim); auto work_buf_offset = rblock_size * rblock_offset + thread_offset; if (read_write_pred) { work_buf[work_buf_offset] = inp_val; } else { work_buf[work_buf_offset] = init_val; } } __syncthreads(); __shared__ bool last_block; if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { __threadfence(); // printf("%ld\n", sync_flags[seg_idx]); auto old = (int64_t)atomicAdd((unsigned long long*)&sync_flags[seg_idx], 1); last_block = old + 1 == seg_size; // printf("Last_block = %d + 1 == %d\n", (int)old, (int)seg_size); } __syncthreads(); if (last_block) { // printf("Last block %d %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z); // final reduction gridReduceLastBlock<X_THREAD, Y_THREAD, Z_THREAD>( out, (T*)work_buf, seg_size * rblock_size, reduction_op, shared_buf, read_write_pred, init_val); return true; } else { // printf("Not last block %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z); return false; } } } // namespace reduction
5a43200e0f19fa125f885cef2d4f2e353f6c417e.hip
// !!! This is a file automatically generated by hipify!!! // Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <hip/hip_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables unsigned* h_A; unsigned* h_B; unsigned* h_C; unsigned* d_A; unsigned* d_B; unsigned* d_C; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=0; unsigned Value2=0; unsigned Value3=0; unsigned Value=0; unsigned I1=A[i]; unsigned I2=B[i]; // Excessive INT addition access if((i%32)<=27){ #pragma unroll 100 for(unsigned k=0; k<iterations;k++) { Value2= I1+I2; Value3=I1-I2; Value1-=Value2; Value3+=Value1; Value2-=Value3; Value1+=Value3; } } __syncthreads(); Value=Value1; C[i]=Value; __syncthreads(); } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (unsigned*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); printf("after\n"); hipEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(hipEventRecord(start)); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations); checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); hipDeviceSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random unsigned entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
5a43200e0f19fa125f885cef2d4f2e353f6c417e.cu
// Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <cuda_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables unsigned* h_A; unsigned* h_B; unsigned* h_C; unsigned* d_A; unsigned* d_B; unsigned* d_C; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=0; unsigned Value2=0; unsigned Value3=0; unsigned Value=0; unsigned I1=A[i]; unsigned I2=B[i]; // Excessive INT addition access if((i%32)<=27){ #pragma unroll 100 for(unsigned k=0; k<iterations;k++) { Value2= I1+I2; Value3=I1-I2; Value1-=Value2; Value3+=Value1; Value2-=Value3; Value1+=Value3; } } __syncthreads(); Value=Value1; C[i]=Value; __syncthreads(); } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (unsigned*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); printf("after\n"); cudaEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(cudaEventRecord(start)); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations); checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); cudaThreadSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random unsigned entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
eeba1f2509f88ba6d3be2d6de23a6b94196a1e8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from ztranspose.cu normal z -> c, Sat Nov 15 19:53:59 2014 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_c #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) static __device__ void ctranspose_device( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { __shared__ magmaFloatComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void ctranspose_kernel( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { ctranspose_device(m, n, A, lda, AT, ldat); } __global__ void ctranspose_kernel_batched( int m, int n, magmaFloatComplex **dA_array, int lda, magmaFloatComplex **dAT_array, int ldat) { int batchid = blockIdx.z; ctranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /** Purpose ------- ctranspose_q copies and transposes a matrix dA to matrix dAT. Same as ctranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA COMPLEX array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT COMPLEX array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_ctranspose_q( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( (m+NB-1)/NB, (n+NB-1)/NB ); hipLaunchKernelGGL(( ctranspose_kernel), dim3(grid), dim3(threads), 0, queue , m, n, dA, ldda, dAT, lddat ); } /** @see magmablas_ctranspose_q @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_ctranspose( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dAT, magma_int_t lddat ) { magmablas_ctranspose_q( m, n, dA, ldda, dAT, lddat, magma_stream ); } /** Purpose ------- ctranspose_batched_q copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as ctranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_ctranspose_batched_q( magma_int_t m, magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( (m+NB-1)/NB, (n+NB-1)/NB, batchCount ); hipLaunchKernelGGL(( ctranspose_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, dA_array, ldda, dAT_array, lddat ); } /** @see magmablas_ctranspose_batched_q @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_ctranspose_batched( magma_int_t m, magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount ) { magmablas_ctranspose_batched_q( m, n, dA_array, ldda, dAT_array, lddat, batchCount, magma_stream ); }
eeba1f2509f88ba6d3be2d6de23a6b94196a1e8d.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from ztranspose.cu normal z -> c, Sat Nov 15 19:53:59 2014 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_c #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) static __device__ void ctranspose_device( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { __shared__ magmaFloatComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void ctranspose_kernel( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { ctranspose_device(m, n, A, lda, AT, ldat); } __global__ void ctranspose_kernel_batched( int m, int n, magmaFloatComplex **dA_array, int lda, magmaFloatComplex **dAT_array, int ldat) { int batchid = blockIdx.z; ctranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /** Purpose ------- ctranspose_q copies and transposes a matrix dA to matrix dAT. Same as ctranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA COMPLEX array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT COMPLEX array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_ctranspose_q( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( (m+NB-1)/NB, (n+NB-1)/NB ); ctranspose_kernel<<< grid, threads, 0, queue >>> ( m, n, dA, ldda, dAT, lddat ); } /** @see magmablas_ctranspose_q @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_ctranspose( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dAT, magma_int_t lddat ) { magmablas_ctranspose_q( m, n, dA, ldda, dAT, lddat, magma_stream ); } /** Purpose ------- ctranspose_batched_q copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as ctranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_ctranspose_batched_q( magma_int_t m, magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( (m+NB-1)/NB, (n+NB-1)/NB, batchCount ); ctranspose_kernel_batched<<< grid, threads, 0, queue >>> ( m, n, dA_array, ldda, dAT_array, lddat ); } /** @see magmablas_ctranspose_batched_q @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_ctranspose_batched( magma_int_t m, magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount ) { magmablas_ctranspose_batched_q( m, n, dA_array, ldda, dAT_array, lddat, batchCount, magma_stream ); }
07a92309ce98acfaf7c298aa3e4271b1353289d0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.cpp" #include "utils.h" #include <cmath> __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int row = blockIdx.x; int col = threadIdx.x; if(row >= numRows || col >= numCols) return; int pixel = row*numCols + col; float result = 0.0f, filterVal = 0.0f, imagVal=0.0f; int halfFW = filterWidth/2; for(int filter_r=-halfFW;filter_r <= halfFW;filter_r++) { for(int filter_c=-halfFW;filter_c<=halfFW;filter_c++) { int image_r = min(max(row + filter_r, 0), static_cast<int>(numRows - 1)); int image_c = min(max(col + filter_c, 0), static_cast<int>(numCols - 1)); filterVal = filter[(filter_r+halfFW)*filterWidth+(filter_c+halfFW)]; imagVal = static_cast<float>(inputChannel[image_r * numCols + image_c]); result+=(filterVal*imagVal); } } outputChannel[pixel]=static_cast<unsigned char>(result); } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } int row = blockIdx.x; int col = threadIdx.x; if(row >= numRows || col >= numCols) { return; } int pixel = row*numCols + col; uchar4 rgba = inputImageRGBA[pixel]; redChannel[pixel] = rgba.x; greenChannel[pixel] = rgba.y; blueChannel[pixel] = rgba.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { /* const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return;*/ int row = blockIdx.x; int col = threadIdx.x; if(row >= numRows || col >= numCols) { return; } int pixel = row*numCols + col; unsigned char red = redChannel[pixel]; unsigned char green = greenChannel[pixel]; unsigned char blue = blueChannel[pixel]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[pixel] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float)*filterWidth*filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(numCols,1,1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(numRows,1,1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize),dim3(blockSize), 0, 0, d_inputImageRGBA,numRows,numCols,d_red,d_green,d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_red,d_redBlurred,numRows,numCols,d_filter,filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_green,d_greenBlurred,numRows,numCols,d_filter,filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_blue,d_blueBlurred,numRows,numCols,d_filter,filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
07a92309ce98acfaf7c298aa3e4271b1353289d0.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.cpp" #include "utils.h" #include <cmath> __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int row = blockIdx.x; int col = threadIdx.x; if(row >= numRows || col >= numCols) return; int pixel = row*numCols + col; float result = 0.0f, filterVal = 0.0f, imagVal=0.0f; int halfFW = filterWidth/2; for(int filter_r=-halfFW;filter_r <= halfFW;filter_r++) { for(int filter_c=-halfFW;filter_c<=halfFW;filter_c++) { int image_r = min(max(row + filter_r, 0), static_cast<int>(numRows - 1)); int image_c = min(max(col + filter_c, 0), static_cast<int>(numCols - 1)); filterVal = filter[(filter_r+halfFW)*filterWidth+(filter_c+halfFW)]; imagVal = static_cast<float>(inputChannel[image_r * numCols + image_c]); result+=(filterVal*imagVal); } } outputChannel[pixel]=static_cast<unsigned char>(result); } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } int row = blockIdx.x; int col = threadIdx.x; if(row >= numRows || col >= numCols) { return; } int pixel = row*numCols + col; uchar4 rgba = inputImageRGBA[pixel]; redChannel[pixel] = rgba.x; greenChannel[pixel] = rgba.y; blueChannel[pixel] = rgba.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { /* const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return;*/ int row = blockIdx.x; int col = threadIdx.x; if(row >= numRows || col >= numCols) { return; } int pixel = row*numCols + col; unsigned char red = redChannel[pixel]; unsigned char green = greenChannel[pixel]; unsigned char blue = blueChannel[pixel]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[pixel] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float)*filterWidth*filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(numCols,1,1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(numRows,1,1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize,blockSize>>>(d_inputImageRGBA,numRows,numCols,d_red,d_green,d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize,blockSize>>>(d_red,d_redBlurred,numRows,numCols,d_filter,filterWidth); gaussian_blur<<<gridSize,blockSize>>>(d_green,d_greenBlurred,numRows,numCols,d_filter,filterWidth); gaussian_blur<<<gridSize,blockSize>>>(d_blue,d_blueBlurred,numRows,numCols,d_filter,filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
b08925294cbd772e47b5c133e19f2dbe14183776.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* % Function: transform_precoder % Description: perform transform precoding on complex data after mapper % Inputs: *symbols_R_h: Real part of the symbols % Inputs: *symbols_I_h: Imag part of the symbols % M_pusch_rb numer of resource blocks assigned to ue % Outputs: *precoded_data transform precodded data By: Ahmad Nour & Mohammed Mostafa */ /* toComplex kernel converts from 2-array implementation 2 hipfftComplex structure and multiples symbols by a coeff (1/sqrt(M_pusch_sc)). The kernel's overhead can be avoided if we merged it with the mapper kernel */ #include "transform_precoder.cuh" __global__ void toComplex(float* symbols_R_d, float* symbols_I_d, hipfftComplex* symbols_d, double coeff, int numThreads) { int idx = blockIdx.x * blockDim.x + threadIdx.x; //Not to run more threads than available data if (idx >= numThreads) return; symbols_d[idx].x = symbols_R_d[idx] * coeff; symbols_d[idx].y = symbols_I_d[idx] * coeff; } void transform_precoder(float* symbols_R_d,float* symbols_I_d, const int M_pusch_rb, int signal_size, hipfftComplex** precoded_data_d, hipfftHandle plan_transform_precoder, hipfftComplex* symbols_d, hipStream_t stream) { int M_pusch_sc = N_sc_rb * M_pusch_rb; //Calc. number of needed threads for calling kernel(s) int numThreads = signal_size; int blockDim = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread) int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); //grid size in bloack (min 1) //Coeff. Multiplication toComplex << <gridDim, blockDim ,0, stream >> > (symbols_R_d, symbols_I_d, symbols_d, rsqrtf(M_pusch_sc), numThreads); hipfftSetStream(plan_transform_precoder, stream); hipfftExecC2C(plan_transform_precoder, symbols_d, *precoded_data_d, HIPFFT_FORWARD); }
b08925294cbd772e47b5c133e19f2dbe14183776.cu
/* % Function: transform_precoder % Description: perform transform precoding on complex data after mapper % Inputs: *symbols_R_h: Real part of the symbols % Inputs: *symbols_I_h: Imag part of the symbols % M_pusch_rb numer of resource blocks assigned to ue % Outputs: *precoded_data transform precodded data By: Ahmad Nour & Mohammed Mostafa */ /* toComplex kernel converts from 2-array implementation 2 cufftComplex structure and multiples symbols by a coeff (1/sqrt(M_pusch_sc)). The kernel's overhead can be avoided if we merged it with the mapper kernel */ #include "transform_precoder.cuh" __global__ void toComplex(float* symbols_R_d, float* symbols_I_d, cufftComplex* symbols_d, double coeff, int numThreads) { int idx = blockIdx.x * blockDim.x + threadIdx.x; //Not to run more threads than available data if (idx >= numThreads) return; symbols_d[idx].x = symbols_R_d[idx] * coeff; symbols_d[idx].y = symbols_I_d[idx] * coeff; } void transform_precoder(float* symbols_R_d,float* symbols_I_d, const int M_pusch_rb, int signal_size, cufftComplex** precoded_data_d, cufftHandle plan_transform_precoder, cufftComplex* symbols_d, cudaStream_t stream) { int M_pusch_sc = N_sc_rb * M_pusch_rb; //Calc. number of needed threads for calling kernel(s) int numThreads = signal_size; int blockDim = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread) int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); //grid size in bloack (min 1) //Coeff. Multiplication toComplex << <gridDim, blockDim ,0, stream >> > (symbols_R_d, symbols_I_d, symbols_d, rsqrtf(M_pusch_sc), numThreads); cufftSetStream(plan_transform_precoder, stream); cufftExecC2C(plan_transform_precoder, symbols_d, *precoded_data_d, CUFFT_FORWARD); }
a693aad6ad26e16bc51c9212b1c54f3e0ed4a8c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include<cstdlib> #include<ctime> #include <iostream> #include<fstream> #include <cstringt.h> #include<vector> #include<windows.h> #include<thread> #include <stdio.h> #include <iomanip> #define RED "\033[31m" #define GREEN "\033[32m" #define YELLOW "\033[33m" #define BLUE "\033[34m" #define RESET "\033[0m" int GPU_CLOCK_RATE; using namespace std; clock_t start, v1, v2, v3, finish; int readIn(float* matrixA,float* matrixB); // float* matrix_product_v1(float* matrixA, float* matrixB, int ma, int na, int mb, int nb); float** matrix_product_v2(float** matrixA, float** matrixB); float** matrix_product_CUDA(float** matrixA, float** matrixB); float** matrixCut(float* matrixA); const int THREAD_NUM = thread::hardware_concurrency(); const int DATA_LENGTH = 200000000, DATA_HALF_LENGTH = 100000000, DATA_WIDTH = 10000, DATA_WIDTH_S = 5000; int active_thread = 768 * 2; struct matrix { int m, n; float* value; }; __global__ void dot_product_Kernel(float* input, float* answer) { int active_thread = 8 * 256; int x = threadIdx.x; int y = blockIdx.x; int pos = x * y; int left = pos; int right = DATA_HALF_LENGTH - 1 + pos; float ans = 0; while (right < DATA_LENGTH) { ans += input[left] * input[right]; left += active_thread; right += active_thread; } answer[pos] = ans; } __global__ void matrix_minus_Kernel(float* ans,float* matrixA,float* matrixB) { int active_thread = 8 * 256; int x = threadIdx.x; int y = blockIdx.x; int pos = y * 256+x; int tmp = DATA_WIDTH_S * DATA_WIDTH_S; while (pos < tmp) { ans[pos] = matrixA[pos] - matrixB[pos]; pos += active_thread; } } __global__ void matrix_add_Kernel(float* ans, float* matrixA, float* matrixB) { int active_thread = 8 * 256; int x = threadIdx.x; int y = blockIdx.x; int pos = y * 256 + x; int tmp = DATA_WIDTH_S * DATA_WIDTH_S; while (pos < tmp) { ans[pos] = matrixA[pos] + matrixB[pos]; pos += active_thread; } } __global__ void matrix_product_Kernel(float* ans, float* matrixA, float* matrixB) { int x = threadIdx.x; int y = blockIdx.x; int pos = y * 256 + x; int active_thread = 8 * 256; int ma = DATA_WIDTH_S; for (int i = 0; i < ma; i++) { pos = pos % (ma); for (pos; pos < ma; pos+=active_thread) { int a = i * ma + pos; ans[a] = 0; for (int k = 0; k + 9 < ma; k += 10) { int b = i * ma + k; int c = k * ma + pos; ans[a] += matrixA[b++] * matrixB[c]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; } } } } __global__ void smatrix_product_Kernel(float* ans, float* matrixA, float* matrixB) { int x = threadIdx.x; int y = blockIdx.x; int ma = DATA_WIDTH_S; for (int i = y; i+8 < ma; i+=8) { __shared__ float row[DATA_WIDTH_S]; int tmp = x; while (tmp < DATA_WIDTH_S) { row[tmp] = matrixA[i * ma + tmp]; tmp += 256; } __syncthreads(); for (int j = x; j< ma;j += 256) { int a = i * ma + j; ans[a] = 0; for (int k = 0; k + 9 < ma; k += 10) { int b = k; int c = k * ma + j; float answer = 0; answer += row[b++] * matrixB[c]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; ans[a] += answer; } } } } __global__ void warmup() { /*GPU*/ } void printDeviceProp(const hipDeviceProp_t& prop) { printf("Device Name : %s\n", prop.name); //printf("totalGlobalMem : %d.\n", prop.totalGlobalMem); //printf("sharedMemPerBlock : %d.\n", prop.sharedMemPerBlock); //printf("regsPerBlock : %d.\n", prop.regsPerBlock); printf("warpSize : %d\n", prop.warpSize); //printf("memPitch : %d.\n", prop.memPitch); printf("maxThreadsPerBlock : %d\n", prop.maxThreadsPerBlock); printf("maxThreadsDim[0 - 2] : %d %d %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("maxGridSize[0 - 2] : %d %d %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); //printf("totalConstMem : %d.\n", prop.totalConstMem); //printf("major.minor : %d.%d.\n", prop.major, prop.minor); printf("clockRate : %d\n", prop.clockRate); //printf("textureAlignment : %d.\n", prop.textureAlignment); //printf("deviceOverlap : %d.\n", prop.deviceOverlap); printf("multiProcessorCount : %d\n", prop.multiProcessorCount); } bool InitCUDA(){ hipError_t cudaStatus; int count; hipGetDeviceCount(&count); if (count == 0) { fprintf(stderr, "There is no device.\n"); } int i; for (i = 0; i < count; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("CUDA device info as below:\n"); printDeviceProp(prop); GPU_CLOCK_RATE = prop.clockRate; if (hipGetDeviceProperties(&prop, i) == hipSuccess) { if (prop.major >= 1) { break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA 11.x.\n"); } hipSetDevice(i); return true; } int main() { cout << RESET; printf("-------------Matrix Product---------------\nEnter number to choose:\n --------------------------------------\n|1-Calculate by inputing matrix A and B|\n --------------------------------------\n|2-Calculate by existed 200000000 data |\n --------------------------------------\nYour choice:"); int op; cin >> op; if (op == 1) { printf("Enter the length of Matrix A(\"length\" \"width\"):"); int ma; int na; cin >> ma; cin >> na; float* matrixA = new float[ma * na]; printf("Enter the Matrix A line by line:\n"); for (int i = 0; i < ma; i++) { for (int j = 0; j < na; j++) { cin >> matrixA[i * na + j]; } } cout<<BLUE << ("Matrix A input complete:\n"); for (int i = 0; i < ma; i++) { for (int j = 0; j < na; j++) { cout << setw(13) << left<<setfill(' ') << matrixA[i * na + j]<<" "; } cout << endl; } cout << RESET; printf("Enter the length of Matrix B:(\"length\" \"width\"):"); int mb, nb; cin >> mb; cin >> nb; float* matrixB = new float[mb * nb]; printf("Enter the Matrix B line by line:\n"); for (int i = 0; i < mb; i++) { for (int j = 0; j < nb; j++) { cin >> matrixB[i * nb + j]; } } cout <<BLUE<< ("Matrix B input complete:\n"); for (int i = 0; i < mb; i++) { for (int j = 0; j < nb; j++) { cout << setw(13) << left << setfill(' ') << matrixB[i * nb + j] << " "; } cout << endl; } cout << RESET; if (na != mb) { cout <<RED<< "Wrong length!Please cheack carefully" << endl; cout << RESET; } else { float* ans = matrix_product_v1(matrixA, matrixB, ma, na, mb, nb); cout <<YELLOW<< "-------------------------------\nComplete!The answer matrix is:\n"; cout << RESET; for (int i = 0; i < ma; i++) { for (int j = 0; j < nb; j++) { cout << setw(13) << left << setfill(' ') << ans[i * nb + j] << " "; } cout << endl; } } } else { cout<<GREEN<<"Initializing CUDA device......\n"; cout << RESET; InitCUDA(); float* input = new float[DATA_HALF_LENGTH]; float* matrixA = new float[DATA_HALF_LENGTH]; float* matrixB = new float[DATA_HALF_LENGTH]; cout << GREEN << "Reading data from disk.....Please wait\n"; cout << RESET; readIn(matrixA,matrixB); // float** ans; start = clock(); cout << GREEN << "Calculating in brute Force....." << RED << "WARNING:This will take a long time!" << endl; cout << RESET; float* ans1 = matrix_product_v1(matrixA, matrixB, DATA_WIDTH, DATA_WIDTH, DATA_WIDTH, DATA_WIDTH); v1 = clock(); cout << "Calculate time for [brute force in prue CPU] : " << (double)(v1 - start) / (CLOCKS_PER_SEC * 60) <<" Minutes"<< endl; cout <<BLUE<< "Calculate answer :\n "<< endl; for (int j = 0; j < 10; j++) { cout << setw(13) << left << setfill(' ') << ans1[j] << " "; } cout << RESET; cout << endl; cout << "Cores in this computer : " << THREAD_NUM << endl; float** Acut = matrixCut(matrixA); float** Bcut = matrixCut(matrixB); cout <<GREEN<< "Calculating in [Strassen + multi CPU]....." << endl; cout << RESET; ans = matrix_product_v2(Acut,Bcut); cout << "Calculate time for [Strassen + multi CPU] : " << (double)(finish - start) / (CLOCKS_PER_SEC*60) <<" Minutes"<< endl; cout <<BLUE<< "Calculate answer (first 10 numbers in answer matrix) : "<< endl; for (int j = 0; j < 10; j++) { cout << setw(13) << left << setfill(' ') << ans[0][j] << " "; } cout << RESET; cout << endl; cout << GREEN << "Calculating by [CUDA+Strassen]......." << endl; cout << RESET; ans = matrix_product_CUDA(Acut,Bcut); } return 0; } int readIn(float* matrixA,float* matrixB) { start = clock(); int count = 0; ifstream ifile; ifile.open("D:\\Cpppppppp\\ldFeature.bin", ios::binary); float b; int i = 0; while (i++ < DATA_HALF_LENGTH) { ifile.read((char*)&b, sizeof(b)); //cout << b << " "; matrixA[count++] = b; } i = 0; count = 0; while (i++ < DATA_HALF_LENGTH) { ifile.read((char*)&b, sizeof(b)); //cout << b << " "; matrixB[count++] = b; } ifile.close(); cout << "Total numbers in vector:" << count*2 << endl; finish = clock(); cout << "IO read-in time : " << (double)(finish - start) / CLOCKS_PER_SEC << " Seconds"<<endl; return 1; } float** matrixCut(float* matrixA) { float** ans = new float*[4]; float* temp1 = new float[DATA_WIDTH_S* DATA_WIDTH_S]; float* temp2 = new float[DATA_WIDTH_S* DATA_WIDTH_S]; for (int i = 0; i < DATA_WIDTH_S; i++) { for (int j = 0; j < DATA_WIDTH_S; j++) { temp1[i * DATA_WIDTH_S + j] = matrixA[i * DATA_WIDTH + j]; } } for (int i = 0; i < DATA_WIDTH_S; i++) { for (int j = 0; j < DATA_WIDTH_S; j++) { temp2[i * DATA_WIDTH_S + j] = matrixA[i * DATA_WIDTH + j+DATA_WIDTH_S]; } } ans[0] = temp1; ans[1] = temp2; float* temp3 = new float[DATA_WIDTH_S * DATA_WIDTH_S]; float* temp4 = new float[DATA_WIDTH_S * DATA_WIDTH_S]; int pass = DATA_WIDTH_S * DATA_WIDTH; for (int i = 0; i < DATA_WIDTH_S; i++) { for (int j = 0; j < DATA_WIDTH_S; j++) { temp3[i * DATA_WIDTH_S + j] = matrixA[i * DATA_WIDTH + j+pass]; } } for (int i = 0; i < DATA_WIDTH_S; i++) { for (int j = 0; j < DATA_WIDTH_S; j++) { temp4[i * DATA_WIDTH_S + j] = matrixA[i * DATA_WIDTH + j+pass+DATA_WIDTH_S]; } } ans[2] = temp3; ans[3] = temp4; delete[DATA_HALF_LENGTH] matrixA; return ans; } void threadMinus(float* ans, float* matrixA, float* matrixB, int ma, int na) { int count = 0; for (int i = 0; i < ma; i++) { for (int j = 0; j < na; j++) { ans[count] = matrixA[count] - matrixB[count]; count++; } } } void threadAdd(float* ans, float* matrixA, float* matrixB, int ma, int na) { int count = 0; for (int i = 0; i < ma; i++) { for (int j = 0; j < na; j++) { ans[count] = matrixA[count] + matrixB[count]; count++; } } } float* matrix_product_v1(float* matrixA, float* matrixB, int ma, int na, int mb, int nb) { float* ans = new float[ma * nb]{ 0 }; for (int i = 0; i < ma; i++) { for (int j = 0; j < nb; j++) { for (int k = 0; k< na; k++) { ans[i * nb +j] += matrixA[i* na+k] * matrixB[k* nb+j]; } } } return ans; } void thread_matrix_product(float* ans, float* matrixA, float* matrixB, int ma) { for (int i = 0; i < ma; i++) { for (int j = 0; j < ma; j++) { for (int k = 0; k + 9 < ma; k += 10) { int a = i * ma + j; int b = i * ma + k; int c = k * ma + j; ans[a] += matrixA[b++] * matrixB[c]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; } } } } float** matrix_product_v2(float** matrixA, float** matrixB) { float* A1 = matrixA[0]; float* A2 = matrixA[1]; float* A3 = matrixA[2]; float* A4 = matrixA[3]; float* B1 = matrixB[0]; float* B2 = matrixB[1]; float* B3 = matrixB[2]; float* B4 = matrixB[3]; start = clock(); thread thread_pool_S[10]; int temp = DATA_WIDTH_S * DATA_WIDTH_S; float* S1 = new float[temp]; float* S2 = new float[temp]; float* S3 = new float[temp]; float* S4 = new float[temp]; float* S5 = new float[temp]; float* S6 = new float[temp]; float* S7 = new float[temp]; float* S8 = new float[temp]; float* S9 = new float[temp]; float* S10 = new float[temp]; thread_pool_S[0] = thread(threadMinus, S1, B1, B4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[1] = thread(threadAdd, S2, A1, A2, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[2] = thread(threadAdd, S3, A3, A4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[3] = thread(threadMinus, S4, B3, B1, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[4] = thread(threadAdd, S5, A1, A4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[5] = thread(threadAdd, S6, B1, B4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[6] = thread(threadMinus, S7, A2, A4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[7] = thread(threadAdd, S8, B3, B4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[8] = thread(threadMinus, S9, A1, A3, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[9] = thread(threadAdd, S10, B1, B2, DATA_WIDTH_S, DATA_WIDTH_S); for (int i = 0; i < 10; i++) { thread_pool_S[i].join(); } thread thread_pool_P[7]; float* P1 = new float[temp] {0}; float* P2 = new float[temp] {0}; float* P3 = new float[temp] {0}; float* P4 = new float[temp] {0}; float* P5 = new float[temp] {0}; float* P6 = new float[temp] {0}; float* P7 = new float[temp] {0}; thread_pool_P[0] = thread(thread_matrix_product, P1, A1, S1, DATA_WIDTH_S); thread_pool_P[1] = thread(thread_matrix_product, P2, S2, B4, DATA_WIDTH_S); thread_pool_P[2] = thread(thread_matrix_product, P3, S3, B1, DATA_WIDTH_S); thread_pool_P[3] = thread(thread_matrix_product, P4, A4, S4, DATA_WIDTH_S); thread_pool_P[4] = thread(thread_matrix_product, P5, S5, S6, DATA_WIDTH_S); thread_pool_P[5] = thread(thread_matrix_product, P6, S7, S8, DATA_WIDTH_S); thread_pool_P[6] = thread(thread_matrix_product, P7, S9, S10, DATA_WIDTH_S); for (int i = 0; i < 7; i++) { thread_pool_P[i].join(); } delete[] S9, S10; float** ans = new float*[4]; thread thread_pool_F[4]; thread_pool_F[0] = thread(threadAdd,S5, P5, P4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_F[1] = thread(threadMinus, S6, P2, P6, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_F[2] = thread(threadAdd, S7, P5, P1, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_F[3] = thread(threadAdd, S8, P3, P7, DATA_WIDTH_S, DATA_WIDTH_S); for (int i = 0; i < 4; i++) { thread_pool_F[i].join(); } thread_pool_F[0] = thread(threadMinus, S1, S5, S6, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_F[1] = thread(threadAdd, S2, P1, P2, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_F[2] = thread(threadAdd, S3, P3, P4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_F[3] = thread(threadMinus, S4, S7, S8, DATA_WIDTH_S, DATA_WIDTH_S); for (int i = 0; i < 4; i++) { thread_pool_F[i].join(); } ans[0] = S1; ans[1] = S2; ans[2] = S3; ans[3] = S4; finish = clock(); return ans; } float** matrix_product_CUDA(float** matrixA, float** matrixB) { cout << "Copying data from RAM to GPU memory....." << endl; hipError_t cudaStatus; int length = DATA_WIDTH_S * DATA_WIDTH_S; float* dev_A1; float* dev_A2; float* dev_A3; float* dev_A4; float* dev_B1; float* dev_B2; float* dev_B3; float* dev_B4; float* S1; float* S2; float* S3; float* S4; float* S5; float* S6; float* S7; float* S8; float* S9; float* S10; float* P1; float* P2; float* P3; float* P4; float* P5; float* P6; float* P7; cudaStatus = hipMalloc((void**)&dev_A1, length * sizeof(float)); cudaStatus = hipMalloc((void**)&dev_A2, length * sizeof(float)); cudaStatus = hipMalloc((void**)&dev_A3, length * sizeof(float)); cudaStatus = hipMalloc((void**)&dev_A4, length * sizeof(float)); cudaStatus = hipMalloc((void**)&dev_B1, length * sizeof(float)); cudaStatus = hipMalloc((void**)&dev_B2, length * sizeof(float)); cudaStatus = hipMalloc((void**)&dev_B3, length * sizeof(float)); cudaStatus = hipMalloc((void**)&dev_B4, length * sizeof(float)); cudaStatus = hipMalloc((void**)&S1, length * sizeof(float)); cudaStatus = hipMalloc((void**)&S2, length * sizeof(float)); cudaStatus = hipMalloc((void**)&S3, length * sizeof(float)); cudaStatus = hipMalloc((void**)&S4, length * sizeof(float)); cudaStatus = hipMalloc((void**)&S5, length * sizeof(float)); cudaStatus = hipMalloc((void**)&S6, length * sizeof(float)); cudaStatus = hipMalloc((void**)&S7, length * sizeof(float)); cudaStatus = hipMalloc((void**)&S8, length * sizeof(float)); cudaStatus = hipMalloc((void**)&S9, length * sizeof(float)); cudaStatus = hipMalloc((void**)&S10, length * sizeof(float)); cudaStatus = hipMalloc((void**)&P1, length * sizeof(float)); cudaStatus = hipMalloc((void**)&P2, length * sizeof(float)); cudaStatus = hipMalloc((void**)&P3, length * sizeof(float)); cudaStatus = hipMalloc((void**)&P4, length * sizeof(float)); cudaStatus = hipMalloc((void**)&P5, length * sizeof(float)); cudaStatus = hipMalloc((void**)&P6, length * sizeof(float)); cudaStatus = hipMalloc((void**)&P7, length * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } cudaStatus = hipMemcpy(dev_A1, matrixA[0], length, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMemcpy(dev_A2, matrixA[1], length * sizeof(float), hipMemcpyHostToDevice); cudaStatus = hipMemcpy(dev_A3, matrixA[2], length * sizeof(float), hipMemcpyHostToDevice); cudaStatus = hipMemcpy(dev_A4, matrixA[3], length * sizeof(float), hipMemcpyHostToDevice); cudaStatus = hipMemcpy(dev_B1, matrixB[0], length * sizeof(float), hipMemcpyHostToDevice); cudaStatus = hipMemcpy(dev_B2, matrixB[1], length * sizeof(float), hipMemcpyHostToDevice); cudaStatus = hipMemcpy(dev_B3, matrixB[2], length * sizeof(float), hipMemcpyHostToDevice); cudaStatus = hipMemcpy(dev_B4, matrixB[3], length * sizeof(float), hipMemcpyHostToDevice); finish = clock(); cout << "Data copy time for GPU : " << (double)(finish - start) / CLOCKS_PER_SEC << " Seconds" << endl; start = clock(); warmup << <1, 1 >> > (); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); { matrix_minus_Kernel << <8, 256 >> > (S1, dev_B1, dev_B4); matrix_add_Kernel << <8, 256 >> > (S2, dev_A1, dev_A2); matrix_add_Kernel << <8, 256 >> > (S3, dev_A3, dev_A4); matrix_minus_Kernel << <8, 256 >> > (S4, dev_B3, dev_B1); matrix_add_Kernel << <8, 256 >> > (S5, dev_A1, dev_A4); matrix_add_Kernel << <8, 256 >> > (S6, dev_B1, dev_B4); matrix_minus_Kernel << <8, 256 >> > (S7, dev_A2, dev_A4); matrix_add_Kernel << <8, 256 >> > (S8, dev_B3, dev_B4); matrix_minus_Kernel << <8, 256 >> > (S9, dev_A1, dev_A3); matrix_add_Kernel << <8, 256 >> > (S10, dev_B1, dev_B2); hipDeviceSynchronize(); cout << "S part finished" << endl; matrix_product_Kernel << <8, 256 >> > (P1, dev_A1, S1); matrix_product_Kernel << <8, 256 >> > (P2, S2, dev_B4); matrix_product_Kernel << <8, 256 >> > (P3, S3, dev_B1); matrix_product_Kernel << <8, 256 >> > (P4, dev_A4, S4); matrix_product_Kernel << <8, 256 >> > (P5, S5, S6); matrix_product_Kernel << <8, 256 >> > (P6, S7, S8); matrix_product_Kernel << <8, 256 >> > (P7, S9, S10); hipDeviceSynchronize(); cout << "P part finished" << endl; matrix_add_Kernel << <8, 256 >> > (S5, P5, P4); matrix_minus_Kernel << <8, 256 >> > (S6, P2, P6); matrix_add_Kernel << <8, 256 >> > (S7, P5, P1); matrix_add_Kernel << <8, 256 >> > (S8, P3, P7); hipDeviceSynchronize(); matrix_minus_Kernel << <8, 256 >> > (S1, S5, S6); matrix_add_Kernel << <8, 256 >> > (S2, P1, P2); matrix_add_Kernel << <8, 256 >> > (S3, P3, P4); matrix_minus_Kernel << <8, 256 >> > (S4, S7, S8); cout << "C part finished" << endl; } hipEventRecord(stop, 0); hipEventSynchronize(start); hipEventSynchronize(stop); float costtime; hipEventElapsedTime(&costtime, start, stop); float** ans = new float* [4]; float* ans0 = new float[length]; float* ans1 = new float[length]; float* ans2 = new float[length]; float* ans3 = new float[length]; cudaStatus = hipMemcpy(ans0, S1, length, hipMemcpyDeviceToHost); cudaStatus = hipMemcpy(ans1, S2, length, hipMemcpyDeviceToHost); cudaStatus = hipMemcpy(ans2, S3, length, hipMemcpyDeviceToHost); cudaStatus = hipMemcpy(ans3, S4, length, hipMemcpyDeviceToHost); ans[0] = ans0; ans[1] = ans1; ans[2] = ans1; ans[3] = ans3; if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cout << "Calculate time for [CUDA+Strassen] : " << costtime / (1000 * 60) << " Minutes" << endl; //cudaStatus = hipMemcpy(answer, dev_answer, active_thread * sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cout << BLUE << "Calculate answer (first 10 numbers in answer matrix) : " << endl; for (int j = 0; j < 10; j++) { cout << setw(13) << left << setfill(' ') << ans[0][j] << " "; } cout << RESET; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); { matrix_minus_Kernel << <8, 256 >> > (S1, dev_B1, dev_B4); matrix_add_Kernel << <8, 256 >> > (S2, dev_A1, dev_A2); matrix_add_Kernel << <8, 256 >> > (S3, dev_A3, dev_A4); matrix_minus_Kernel << <8, 256 >> > (S4, dev_B3, dev_B1); matrix_add_Kernel << <8, 256 >> > (S5, dev_A1, dev_A4); matrix_add_Kernel << <8, 256 >> > (S6, dev_B1, dev_B4); matrix_minus_Kernel << <8, 256 >> > (S7, dev_A2, dev_A4); matrix_add_Kernel << <8, 256 >> > (S8, dev_B3, dev_B4); matrix_minus_Kernel << <8, 256 >> > (S9, dev_A1, dev_A3); matrix_add_Kernel << <8, 256 >> > (S10, dev_B1, dev_B2); hipDeviceSynchronize(); cout << "S part finished" << endl; smatrix_product_Kernel << <8, 256 >> > (P1, dev_A1, S1); smatrix_product_Kernel << <8, 256 >> > (P2, S2, dev_B4); smatrix_product_Kernel << <8, 256 >> > (P3, S3, dev_B1); smatrix_product_Kernel << <8, 256 >> > (P4, dev_A4, S4); smatrix_product_Kernel << <8, 256 >> > (P5, S5, S6); smatrix_product_Kernel << <8, 256 >> > (P6, S7, S8); smatrix_product_Kernel << <8, 256 >> > (P7, S9, S10); hipDeviceSynchronize(); cout << "P part finished" << endl; matrix_add_Kernel << <8, 256 >> > (S5, P5, P4); matrix_minus_Kernel << <8, 256 >> > (S6, P2, P6); matrix_add_Kernel << <8, 256 >> > (S7, P5, P1); matrix_add_Kernel << <8, 256 >> > (S8, P3, P7); hipDeviceSynchronize(); matrix_minus_Kernel << <8, 256 >> > (S1, S5, S6); matrix_add_Kernel << <8, 256 >> > (S2, P1, P2); matrix_add_Kernel << <8, 256 >> > (S3, P3, P4); matrix_minus_Kernel << <8, 256 >> > (S4, S7, S8); cout << "C part finished" << endl; } hipEventRecord(stop, 0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&costtime, start, stop); cudaStatus = hipMemcpy(ans0, S1, length, hipMemcpyDeviceToHost); cudaStatus = hipMemcpy(ans1, S2, length, hipMemcpyDeviceToHost); cudaStatus = hipMemcpy(ans2, S3, length, hipMemcpyDeviceToHost); cudaStatus = hipMemcpy(ans3, S4, length, hipMemcpyDeviceToHost); ans[0] = ans0; ans[1] = ans1; ans[2] = ans1; ans[3] = ans3; cout << "Calculate time for [CUDA+Strassen+Shared Memory] : " << costtime / (1000 * 60) << " Minutes" << endl; cout << BLUE << "Calculate answer (first 10 numbers in answer matrix) : " << endl; for (int j = 0; j < 10; j++) { cout << setw(13) << left << setfill(' ') << ans[0][j] << " "; } cout << RESET; return ans; }
a693aad6ad26e16bc51c9212b1c54f3e0ed4a8c3.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include<cstdlib> #include<ctime> #include <iostream> #include<fstream> #include <cstringt.h> #include<vector> #include<windows.h> #include<thread> #include <stdio.h> #include <iomanip> #define RED "\033[31m" #define GREEN "\033[32m" #define YELLOW "\033[33m" #define BLUE "\033[34m" #define RESET "\033[0m" int GPU_CLOCK_RATE; using namespace std; clock_t start, v1, v2, v3, finish; int readIn(float* matrixA,float* matrixB); //从指定路径读取指定二进制文件,必须提前准备好 float* matrix_product_v1(float* matrixA, float* matrixB, int ma, int na, int mb, int nb); float** matrix_product_v2(float** matrixA, float** matrixB); float** matrix_product_CUDA(float** matrixA, float** matrixB); float** matrixCut(float* matrixA); const int THREAD_NUM = thread::hardware_concurrency(); const int DATA_LENGTH = 200000000, DATA_HALF_LENGTH = 100000000, DATA_WIDTH = 10000, DATA_WIDTH_S = 5000; int active_thread = 768 * 2; struct matrix { int m, n; float* value; }; __global__ void dot_product_Kernel(float* input, float* answer) { int active_thread = 8 * 256; int x = threadIdx.x; int y = blockIdx.x; int pos = x * y; int left = pos; int right = DATA_HALF_LENGTH - 1 + pos; float ans = 0; while (right < DATA_LENGTH) { ans += input[left] * input[right]; left += active_thread; right += active_thread; } answer[pos] = ans; } __global__ void matrix_minus_Kernel(float* ans,float* matrixA,float* matrixB) { int active_thread = 8 * 256; int x = threadIdx.x; int y = blockIdx.x; int pos = y * 256+x; int tmp = DATA_WIDTH_S * DATA_WIDTH_S; while (pos < tmp) { ans[pos] = matrixA[pos] - matrixB[pos]; pos += active_thread; } } __global__ void matrix_add_Kernel(float* ans, float* matrixA, float* matrixB) { int active_thread = 8 * 256; int x = threadIdx.x; int y = blockIdx.x; int pos = y * 256 + x; int tmp = DATA_WIDTH_S * DATA_WIDTH_S; while (pos < tmp) { ans[pos] = matrixA[pos] + matrixB[pos]; pos += active_thread; } } __global__ void matrix_product_Kernel(float* ans, float* matrixA, float* matrixB) { int x = threadIdx.x; int y = blockIdx.x; int pos = y * 256 + x; int active_thread = 8 * 256; int ma = DATA_WIDTH_S; for (int i = 0; i < ma; i++) { pos = pos % (ma); for (pos; pos < ma; pos+=active_thread) { int a = i * ma + pos; ans[a] = 0; for (int k = 0; k + 9 < ma; k += 10) { int b = i * ma + k; int c = k * ma + pos; ans[a] += matrixA[b++] * matrixB[c]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; } } } } __global__ void smatrix_product_Kernel(float* ans, float* matrixA, float* matrixB) { int x = threadIdx.x; int y = blockIdx.x; int ma = DATA_WIDTH_S; for (int i = y; i+8 < ma; i+=8) { __shared__ float row[DATA_WIDTH_S]; int tmp = x; while (tmp < DATA_WIDTH_S) { row[tmp] = matrixA[i * ma + tmp]; tmp += 256; } __syncthreads(); for (int j = x; j< ma;j += 256) { int a = i * ma + j; ans[a] = 0; for (int k = 0; k + 9 < ma; k += 10) { int b = k; int c = k * ma + j; float answer = 0; answer += row[b++] * matrixB[c]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; answer += row[b++] * matrixB[c += ma]; ans[a] += answer; } } } } __global__ void warmup() { /*预热GPU,调用一个空的核函数*/ } void printDeviceProp(const cudaDeviceProp& prop) { printf("Device Name : %s\n", prop.name); //printf("totalGlobalMem : %d.\n", prop.totalGlobalMem); //printf("sharedMemPerBlock : %d.\n", prop.sharedMemPerBlock); //printf("regsPerBlock : %d.\n", prop.regsPerBlock); printf("warpSize : %d\n", prop.warpSize); //printf("memPitch : %d.\n", prop.memPitch); printf("maxThreadsPerBlock : %d\n", prop.maxThreadsPerBlock); printf("maxThreadsDim[0 - 2] : %d %d %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("maxGridSize[0 - 2] : %d %d %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); //printf("totalConstMem : %d.\n", prop.totalConstMem); //printf("major.minor : %d.%d.\n", prop.major, prop.minor); printf("clockRate : %d\n", prop.clockRate); //printf("textureAlignment : %d.\n", prop.textureAlignment); //printf("deviceOverlap : %d.\n", prop.deviceOverlap); printf("multiProcessorCount : %d\n", prop.multiProcessorCount); } bool InitCUDA(){ cudaError_t cudaStatus; int count; cudaGetDeviceCount(&count); if (count == 0) { fprintf(stderr, "There is no device.\n"); } int i; for (i = 0; i < count; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("CUDA device info as below:\n"); printDeviceProp(prop); GPU_CLOCK_RATE = prop.clockRate; if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if (prop.major >= 1) { break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA 11.x.\n"); } cudaSetDevice(i); return true; } int main() { cout << RESET; printf("-------------Matrix Product---------------\nEnter number to choose:\n --------------------------------------\n|1-Calculate by inputing matrix A and B|\n --------------------------------------\n|2-Calculate by existed 200000000 data |\n --------------------------------------\nYour choice:"); int op; cin >> op; if (op == 1) { printf("Enter the length of Matrix A(\"length\" \"width\"):"); int ma; int na; cin >> ma; cin >> na; float* matrixA = new float[ma * na]; printf("Enter the Matrix A line by line:\n"); for (int i = 0; i < ma; i++) { for (int j = 0; j < na; j++) { cin >> matrixA[i * na + j]; } } cout<<BLUE << ("Matrix A input complete:\n"); for (int i = 0; i < ma; i++) { for (int j = 0; j < na; j++) { cout << setw(13) << left<<setfill(' ') << matrixA[i * na + j]<<" "; } cout << endl; } cout << RESET; printf("Enter the length of Matrix B:(\"length\" \"width\"):"); int mb, nb; cin >> mb; cin >> nb; float* matrixB = new float[mb * nb]; printf("Enter the Matrix B line by line:\n"); for (int i = 0; i < mb; i++) { for (int j = 0; j < nb; j++) { cin >> matrixB[i * nb + j]; } } cout <<BLUE<< ("Matrix B input complete:\n"); for (int i = 0; i < mb; i++) { for (int j = 0; j < nb; j++) { cout << setw(13) << left << setfill(' ') << matrixB[i * nb + j] << " "; } cout << endl; } cout << RESET; if (na != mb) { cout <<RED<< "Wrong length!Please cheack carefully" << endl; cout << RESET; } else { float* ans = matrix_product_v1(matrixA, matrixB, ma, na, mb, nb); cout <<YELLOW<< "-------------------------------\nComplete!The answer matrix is:\n"; cout << RESET; for (int i = 0; i < ma; i++) { for (int j = 0; j < nb; j++) { cout << setw(13) << left << setfill(' ') << ans[i * nb + j] << " "; } cout << endl; } } } else { cout<<GREEN<<"Initializing CUDA device......\n"; cout << RESET; InitCUDA(); float* input = new float[DATA_HALF_LENGTH]; float* matrixA = new float[DATA_HALF_LENGTH]; float* matrixB = new float[DATA_HALF_LENGTH]; cout << GREEN << "Reading data from disk.....Please wait\n"; cout << RESET; readIn(matrixA,matrixB); //读入数据 float** ans; start = clock(); cout << GREEN << "Calculating in brute Force....." << RED << "WARNING:This will take a long time!" << endl; cout << RESET; float* ans1 = matrix_product_v1(matrixA, matrixB, DATA_WIDTH, DATA_WIDTH, DATA_WIDTH, DATA_WIDTH); v1 = clock(); cout << "Calculate time for [brute force in prue CPU] : " << (double)(v1 - start) / (CLOCKS_PER_SEC * 60) <<" Minutes"<< endl; cout <<BLUE<< "Calculate answer :\n "<< endl; for (int j = 0; j < 10; j++) { cout << setw(13) << left << setfill(' ') << ans1[j] << " "; } cout << RESET; cout << endl; cout << "Cores in this computer : " << THREAD_NUM << endl; float** Acut = matrixCut(matrixA); float** Bcut = matrixCut(matrixB); cout <<GREEN<< "Calculating in [Strassen + multi CPU]....." << endl; cout << RESET; ans = matrix_product_v2(Acut,Bcut); cout << "Calculate time for [Strassen + multi CPU] : " << (double)(finish - start) / (CLOCKS_PER_SEC*60) <<" Minutes"<< endl; cout <<BLUE<< "Calculate answer (first 10 numbers in answer matrix) : "<< endl; for (int j = 0; j < 10; j++) { cout << setw(13) << left << setfill(' ') << ans[0][j] << " "; } cout << RESET; cout << endl; cout << GREEN << "Calculating by [CUDA+Strassen]......." << endl; cout << RESET; ans = matrix_product_CUDA(Acut,Bcut); } return 0; } int readIn(float* matrixA,float* matrixB) { start = clock(); int count = 0; ifstream ifile; ifile.open("D:\\Cpppppppp\\ldFeature.bin", ios::binary); float b; int i = 0; while (i++ < DATA_HALF_LENGTH) { ifile.read((char*)&b, sizeof(b)); //cout << b << " "; matrixA[count++] = b; } i = 0; count = 0; while (i++ < DATA_HALF_LENGTH) { ifile.read((char*)&b, sizeof(b)); //cout << b << " "; matrixB[count++] = b; } ifile.close(); cout << "Total numbers in vector:" << count*2 << endl; finish = clock(); cout << "IO read-in time : " << (double)(finish - start) / CLOCKS_PER_SEC << " Seconds"<<endl; return 1; } float** matrixCut(float* matrixA) { float** ans = new float*[4]; float* temp1 = new float[DATA_WIDTH_S* DATA_WIDTH_S]; float* temp2 = new float[DATA_WIDTH_S* DATA_WIDTH_S]; for (int i = 0; i < DATA_WIDTH_S; i++) { for (int j = 0; j < DATA_WIDTH_S; j++) { temp1[i * DATA_WIDTH_S + j] = matrixA[i * DATA_WIDTH + j]; } } for (int i = 0; i < DATA_WIDTH_S; i++) { for (int j = 0; j < DATA_WIDTH_S; j++) { temp2[i * DATA_WIDTH_S + j] = matrixA[i * DATA_WIDTH + j+DATA_WIDTH_S]; } } ans[0] = temp1; ans[1] = temp2; float* temp3 = new float[DATA_WIDTH_S * DATA_WIDTH_S]; float* temp4 = new float[DATA_WIDTH_S * DATA_WIDTH_S]; int pass = DATA_WIDTH_S * DATA_WIDTH; for (int i = 0; i < DATA_WIDTH_S; i++) { for (int j = 0; j < DATA_WIDTH_S; j++) { temp3[i * DATA_WIDTH_S + j] = matrixA[i * DATA_WIDTH + j+pass]; } } for (int i = 0; i < DATA_WIDTH_S; i++) { for (int j = 0; j < DATA_WIDTH_S; j++) { temp4[i * DATA_WIDTH_S + j] = matrixA[i * DATA_WIDTH + j+pass+DATA_WIDTH_S]; } } ans[2] = temp3; ans[3] = temp4; delete[DATA_HALF_LENGTH] matrixA; return ans; } void threadMinus(float* ans, float* matrixA, float* matrixB, int ma, int na) { int count = 0; for (int i = 0; i < ma; i++) { for (int j = 0; j < na; j++) { ans[count] = matrixA[count] - matrixB[count]; count++; } } } void threadAdd(float* ans, float* matrixA, float* matrixB, int ma, int na) { int count = 0; for (int i = 0; i < ma; i++) { for (int j = 0; j < na; j++) { ans[count] = matrixA[count] + matrixB[count]; count++; } } } float* matrix_product_v1(float* matrixA, float* matrixB, int ma, int na, int mb, int nb) { float* ans = new float[ma * nb]{ 0 }; for (int i = 0; i < ma; i++) { for (int j = 0; j < nb; j++) { for (int k = 0; k< na; k++) { ans[i * nb +j] += matrixA[i* na+k] * matrixB[k* nb+j]; } } } return ans; } void thread_matrix_product(float* ans, float* matrixA, float* matrixB, int ma) { for (int i = 0; i < ma; i++) { for (int j = 0; j < ma; j++) { for (int k = 0; k + 9 < ma; k += 10) { int a = i * ma + j; int b = i * ma + k; int c = k * ma + j; ans[a] += matrixA[b++] * matrixB[c]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; ans[a] += matrixA[b++] * matrixB[c += ma]; } } } } float** matrix_product_v2(float** matrixA, float** matrixB) { float* A1 = matrixA[0]; float* A2 = matrixA[1]; float* A3 = matrixA[2]; float* A4 = matrixA[3]; float* B1 = matrixB[0]; float* B2 = matrixB[1]; float* B3 = matrixB[2]; float* B4 = matrixB[3]; start = clock(); thread thread_pool_S[10]; int temp = DATA_WIDTH_S * DATA_WIDTH_S; float* S1 = new float[temp]; float* S2 = new float[temp]; float* S3 = new float[temp]; float* S4 = new float[temp]; float* S5 = new float[temp]; float* S6 = new float[temp]; float* S7 = new float[temp]; float* S8 = new float[temp]; float* S9 = new float[temp]; float* S10 = new float[temp]; thread_pool_S[0] = thread(threadMinus, S1, B1, B4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[1] = thread(threadAdd, S2, A1, A2, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[2] = thread(threadAdd, S3, A3, A4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[3] = thread(threadMinus, S4, B3, B1, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[4] = thread(threadAdd, S5, A1, A4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[5] = thread(threadAdd, S6, B1, B4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[6] = thread(threadMinus, S7, A2, A4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[7] = thread(threadAdd, S8, B3, B4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[8] = thread(threadMinus, S9, A1, A3, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_S[9] = thread(threadAdd, S10, B1, B2, DATA_WIDTH_S, DATA_WIDTH_S); for (int i = 0; i < 10; i++) { thread_pool_S[i].join(); } thread thread_pool_P[7]; float* P1 = new float[temp] {0}; float* P2 = new float[temp] {0}; float* P3 = new float[temp] {0}; float* P4 = new float[temp] {0}; float* P5 = new float[temp] {0}; float* P6 = new float[temp] {0}; float* P7 = new float[temp] {0}; thread_pool_P[0] = thread(thread_matrix_product, P1, A1, S1, DATA_WIDTH_S); thread_pool_P[1] = thread(thread_matrix_product, P2, S2, B4, DATA_WIDTH_S); thread_pool_P[2] = thread(thread_matrix_product, P3, S3, B1, DATA_WIDTH_S); thread_pool_P[3] = thread(thread_matrix_product, P4, A4, S4, DATA_WIDTH_S); thread_pool_P[4] = thread(thread_matrix_product, P5, S5, S6, DATA_WIDTH_S); thread_pool_P[5] = thread(thread_matrix_product, P6, S7, S8, DATA_WIDTH_S); thread_pool_P[6] = thread(thread_matrix_product, P7, S9, S10, DATA_WIDTH_S); for (int i = 0; i < 7; i++) { thread_pool_P[i].join(); } delete[] S9, S10; float** ans = new float*[4]; thread thread_pool_F[4]; thread_pool_F[0] = thread(threadAdd,S5, P5, P4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_F[1] = thread(threadMinus, S6, P2, P6, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_F[2] = thread(threadAdd, S7, P5, P1, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_F[3] = thread(threadAdd, S8, P3, P7, DATA_WIDTH_S, DATA_WIDTH_S); for (int i = 0; i < 4; i++) { thread_pool_F[i].join(); } thread_pool_F[0] = thread(threadMinus, S1, S5, S6, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_F[1] = thread(threadAdd, S2, P1, P2, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_F[2] = thread(threadAdd, S3, P3, P4, DATA_WIDTH_S, DATA_WIDTH_S); thread_pool_F[3] = thread(threadMinus, S4, S7, S8, DATA_WIDTH_S, DATA_WIDTH_S); for (int i = 0; i < 4; i++) { thread_pool_F[i].join(); } ans[0] = S1; ans[1] = S2; ans[2] = S3; ans[3] = S4; finish = clock(); return ans; } float** matrix_product_CUDA(float** matrixA, float** matrixB) { cout << "Copying data from RAM to GPU memory....." << endl; cudaError_t cudaStatus; int length = DATA_WIDTH_S * DATA_WIDTH_S; float* dev_A1; float* dev_A2; float* dev_A3; float* dev_A4; float* dev_B1; float* dev_B2; float* dev_B3; float* dev_B4; float* S1; float* S2; float* S3; float* S4; float* S5; float* S6; float* S7; float* S8; float* S9; float* S10; float* P1; float* P2; float* P3; float* P4; float* P5; float* P6; float* P7; cudaStatus = cudaMalloc((void**)&dev_A1, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&dev_A2, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&dev_A3, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&dev_A4, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&dev_B1, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&dev_B2, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&dev_B3, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&dev_B4, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&S1, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&S2, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&S3, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&S4, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&S5, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&S6, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&S7, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&S8, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&S9, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&S10, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&P1, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&P2, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&P3, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&P4, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&P5, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&P6, length * sizeof(float)); cudaStatus = cudaMalloc((void**)&P7, length * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMemcpy(dev_A1, matrixA[0], length, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy(dev_A2, matrixA[1], length * sizeof(float), cudaMemcpyHostToDevice); cudaStatus = cudaMemcpy(dev_A3, matrixA[2], length * sizeof(float), cudaMemcpyHostToDevice); cudaStatus = cudaMemcpy(dev_A4, matrixA[3], length * sizeof(float), cudaMemcpyHostToDevice); cudaStatus = cudaMemcpy(dev_B1, matrixB[0], length * sizeof(float), cudaMemcpyHostToDevice); cudaStatus = cudaMemcpy(dev_B2, matrixB[1], length * sizeof(float), cudaMemcpyHostToDevice); cudaStatus = cudaMemcpy(dev_B3, matrixB[2], length * sizeof(float), cudaMemcpyHostToDevice); cudaStatus = cudaMemcpy(dev_B4, matrixB[3], length * sizeof(float), cudaMemcpyHostToDevice); finish = clock(); cout << "Data copy time for GPU : " << (double)(finish - start) / CLOCKS_PER_SEC << " Seconds" << endl; start = clock(); warmup << <1, 1 >> > (); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); { matrix_minus_Kernel << <8, 256 >> > (S1, dev_B1, dev_B4); matrix_add_Kernel << <8, 256 >> > (S2, dev_A1, dev_A2); matrix_add_Kernel << <8, 256 >> > (S3, dev_A3, dev_A4); matrix_minus_Kernel << <8, 256 >> > (S4, dev_B3, dev_B1); matrix_add_Kernel << <8, 256 >> > (S5, dev_A1, dev_A4); matrix_add_Kernel << <8, 256 >> > (S6, dev_B1, dev_B4); matrix_minus_Kernel << <8, 256 >> > (S7, dev_A2, dev_A4); matrix_add_Kernel << <8, 256 >> > (S8, dev_B3, dev_B4); matrix_minus_Kernel << <8, 256 >> > (S9, dev_A1, dev_A3); matrix_add_Kernel << <8, 256 >> > (S10, dev_B1, dev_B2); cudaDeviceSynchronize(); cout << "S part finished" << endl; matrix_product_Kernel << <8, 256 >> > (P1, dev_A1, S1); matrix_product_Kernel << <8, 256 >> > (P2, S2, dev_B4); matrix_product_Kernel << <8, 256 >> > (P3, S3, dev_B1); matrix_product_Kernel << <8, 256 >> > (P4, dev_A4, S4); matrix_product_Kernel << <8, 256 >> > (P5, S5, S6); matrix_product_Kernel << <8, 256 >> > (P6, S7, S8); matrix_product_Kernel << <8, 256 >> > (P7, S9, S10); cudaDeviceSynchronize(); cout << "P part finished" << endl; matrix_add_Kernel << <8, 256 >> > (S5, P5, P4); matrix_minus_Kernel << <8, 256 >> > (S6, P2, P6); matrix_add_Kernel << <8, 256 >> > (S7, P5, P1); matrix_add_Kernel << <8, 256 >> > (S8, P3, P7); cudaDeviceSynchronize(); matrix_minus_Kernel << <8, 256 >> > (S1, S5, S6); matrix_add_Kernel << <8, 256 >> > (S2, P1, P2); matrix_add_Kernel << <8, 256 >> > (S3, P3, P4); matrix_minus_Kernel << <8, 256 >> > (S4, S7, S8); cout << "C part finished" << endl; } cudaEventRecord(stop, 0); cudaEventSynchronize(start); cudaEventSynchronize(stop); float costtime; cudaEventElapsedTime(&costtime, start, stop); float** ans = new float* [4]; float* ans0 = new float[length]; float* ans1 = new float[length]; float* ans2 = new float[length]; float* ans3 = new float[length]; cudaStatus = cudaMemcpy(ans0, S1, length, cudaMemcpyDeviceToHost); cudaStatus = cudaMemcpy(ans1, S2, length, cudaMemcpyDeviceToHost); cudaStatus = cudaMemcpy(ans2, S3, length, cudaMemcpyDeviceToHost); cudaStatus = cudaMemcpy(ans3, S4, length, cudaMemcpyDeviceToHost); ans[0] = ans0; ans[1] = ans1; ans[2] = ans1; ans[3] = ans3; if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cout << "Calculate time for [CUDA+Strassen] : " << costtime / (1000 * 60) << " Minutes" << endl; //cudaStatus = cudaMemcpy(answer, dev_answer, active_thread * sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cout << BLUE << "Calculate answer (first 10 numbers in answer matrix) : " << endl; for (int j = 0; j < 10; j++) { cout << setw(13) << left << setfill(' ') << ans[0][j] << " "; } cout << RESET; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); { matrix_minus_Kernel << <8, 256 >> > (S1, dev_B1, dev_B4); matrix_add_Kernel << <8, 256 >> > (S2, dev_A1, dev_A2); matrix_add_Kernel << <8, 256 >> > (S3, dev_A3, dev_A4); matrix_minus_Kernel << <8, 256 >> > (S4, dev_B3, dev_B1); matrix_add_Kernel << <8, 256 >> > (S5, dev_A1, dev_A4); matrix_add_Kernel << <8, 256 >> > (S6, dev_B1, dev_B4); matrix_minus_Kernel << <8, 256 >> > (S7, dev_A2, dev_A4); matrix_add_Kernel << <8, 256 >> > (S8, dev_B3, dev_B4); matrix_minus_Kernel << <8, 256 >> > (S9, dev_A1, dev_A3); matrix_add_Kernel << <8, 256 >> > (S10, dev_B1, dev_B2); cudaDeviceSynchronize(); cout << "S part finished" << endl; smatrix_product_Kernel << <8, 256 >> > (P1, dev_A1, S1); smatrix_product_Kernel << <8, 256 >> > (P2, S2, dev_B4); smatrix_product_Kernel << <8, 256 >> > (P3, S3, dev_B1); smatrix_product_Kernel << <8, 256 >> > (P4, dev_A4, S4); smatrix_product_Kernel << <8, 256 >> > (P5, S5, S6); smatrix_product_Kernel << <8, 256 >> > (P6, S7, S8); smatrix_product_Kernel << <8, 256 >> > (P7, S9, S10); cudaDeviceSynchronize(); cout << "P part finished" << endl; matrix_add_Kernel << <8, 256 >> > (S5, P5, P4); matrix_minus_Kernel << <8, 256 >> > (S6, P2, P6); matrix_add_Kernel << <8, 256 >> > (S7, P5, P1); matrix_add_Kernel << <8, 256 >> > (S8, P3, P7); cudaDeviceSynchronize(); matrix_minus_Kernel << <8, 256 >> > (S1, S5, S6); matrix_add_Kernel << <8, 256 >> > (S2, P1, P2); matrix_add_Kernel << <8, 256 >> > (S3, P3, P4); matrix_minus_Kernel << <8, 256 >> > (S4, S7, S8); cout << "C part finished" << endl; } cudaEventRecord(stop, 0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&costtime, start, stop); cudaStatus = cudaMemcpy(ans0, S1, length, cudaMemcpyDeviceToHost); cudaStatus = cudaMemcpy(ans1, S2, length, cudaMemcpyDeviceToHost); cudaStatus = cudaMemcpy(ans2, S3, length, cudaMemcpyDeviceToHost); cudaStatus = cudaMemcpy(ans3, S4, length, cudaMemcpyDeviceToHost); ans[0] = ans0; ans[1] = ans1; ans[2] = ans1; ans[3] = ans3; cout << "Calculate time for [CUDA+Strassen+Shared Memory] : " << costtime / (1000 * 60) << " Minutes" << endl; cout << BLUE << "Calculate answer (first 10 numbers in answer matrix) : " << endl; for (int j = 0; j < 10; j++) { cout << setw(13) << left << setfill(' ') << ans[0][j] << " "; } cout << RESET; return ans; }
209e9fb02054c388da8412a5826990c04391596b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "needle.h" #include <stdio.h> #define SDATA( index) CUT_BANK_CHECKER(sdata, index) __device__ __host__ int maximum( int a, int b, int c){ int k; if( a <= b ) k = b; else k = a; if( k <=c ) return(c); else return(k); } __global__ void needle_cuda_shared_1( int* referrence, int* matrix_cuda, int cols, int penalty, int i, int block_width) { int bx = blockIdx.x; int tx = threadIdx.x; int b_index_x = bx; int b_index_y = i - 1 - bx; int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 ); int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols ); int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; __shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1]; __shared__ int ref[BLOCK_SIZE][BLOCK_SIZE]; if (tx == 0) temp[tx][0] = matrix_cuda[index_nw]; for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) ref[ty][tx] = referrence[index + cols * ty]; __syncthreads(); temp[tx + 1][0] = matrix_cuda[index_w + cols * tx]; __syncthreads(); temp[0][tx + 1] = matrix_cuda[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) matrix_cuda[index + ty * cols] = temp[ty+1][tx+1]; } __global__ void needle_cuda_shared_2( int* referrence, int* matrix_cuda, int cols, int penalty, int i, int block_width) { int bx = blockIdx.x; int tx = threadIdx.x; int b_index_x = bx + block_width - i ; int b_index_y = block_width - bx -1; int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 ); int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols ); int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; __shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1]; __shared__ int ref[BLOCK_SIZE][BLOCK_SIZE]; for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) ref[ty][tx] = referrence[index + cols * ty]; __syncthreads(); if (tx == 0) temp[tx][0] = matrix_cuda[index_nw]; temp[tx + 1][0] = matrix_cuda[index_w + cols * tx]; __syncthreads(); temp[0][tx + 1] = matrix_cuda[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) matrix_cuda[index + ty * cols] = temp[ty+1][tx+1]; }
209e9fb02054c388da8412a5826990c04391596b.cu
#include "needle.h" #include <stdio.h> #define SDATA( index) CUT_BANK_CHECKER(sdata, index) __device__ __host__ int maximum( int a, int b, int c){ int k; if( a <= b ) k = b; else k = a; if( k <=c ) return(c); else return(k); } __global__ void needle_cuda_shared_1( int* referrence, int* matrix_cuda, int cols, int penalty, int i, int block_width) { int bx = blockIdx.x; int tx = threadIdx.x; int b_index_x = bx; int b_index_y = i - 1 - bx; int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 ); int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols ); int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; __shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1]; __shared__ int ref[BLOCK_SIZE][BLOCK_SIZE]; if (tx == 0) temp[tx][0] = matrix_cuda[index_nw]; for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) ref[ty][tx] = referrence[index + cols * ty]; __syncthreads(); temp[tx + 1][0] = matrix_cuda[index_w + cols * tx]; __syncthreads(); temp[0][tx + 1] = matrix_cuda[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) matrix_cuda[index + ty * cols] = temp[ty+1][tx+1]; } __global__ void needle_cuda_shared_2( int* referrence, int* matrix_cuda, int cols, int penalty, int i, int block_width) { int bx = blockIdx.x; int tx = threadIdx.x; int b_index_x = bx + block_width - i ; int b_index_y = block_width - bx -1; int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 ); int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols ); int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; __shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1]; __shared__ int ref[BLOCK_SIZE][BLOCK_SIZE]; for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) ref[ty][tx] = referrence[index + cols * ty]; __syncthreads(); if (tx == 0) temp[tx][0] = matrix_cuda[index_nw]; temp[tx + 1][0] = matrix_cuda[index_w + cols * tx]; __syncthreads(); temp[0][tx + 1] = matrix_cuda[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1], temp[t_index_y][t_index_x-1] - penalty, temp[t_index_y-1][t_index_x] - penalty); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) matrix_cuda[index + ty * cols] = temp[ty+1][tx+1]; }
2e47eb5f0015ded9a95fcf0e42c28ba11b13a669.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright (c) 2015 by Contributors * \file lrn.cu * \brief * \author Bing Xu */ #include "./lrn-inl.h" #if MXNET_USE_CUDNN == 1 #include "./cudnn_lrn-inl.h" #endif namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(LRNParam param, int dtype) { #if MXNET_USE_CUDNN == 1 return new CuDNNLocalResponseNormOp(param); #else #if TORCH_HIP_VERSION == 7000 LOG(FATAL) << "Due to old CUDA compiler bug, LRN is disabled." << "Please upgrade CUDA to 7.5+ or use CUDNN"; return NULL; #else return new LocalResponseNormOp<gpu>(param); #endif // TORCH_HIP_VERSION #endif // MXNET_USE_CUDNN } } // namespace op } // namespace mxnet
2e47eb5f0015ded9a95fcf0e42c28ba11b13a669.cu
/*! * Copyright (c) 2015 by Contributors * \file lrn.cu * \brief * \author Bing Xu */ #include "./lrn-inl.h" #if MXNET_USE_CUDNN == 1 #include "./cudnn_lrn-inl.h" #endif namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(LRNParam param, int dtype) { #if MXNET_USE_CUDNN == 1 return new CuDNNLocalResponseNormOp(param); #else #if CUDA_VERSION == 7000 LOG(FATAL) << "Due to old CUDA compiler bug, LRN is disabled." << "Please upgrade CUDA to 7.5+ or use CUDNN"; return NULL; #else return new LocalResponseNormOp<gpu>(param); #endif // CUDA_VERSION #endif // MXNET_USE_CUDNN } } // namespace op } // namespace mxnet
0978b15292f1c13deec24393064f23760e98f65a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void BaseNeuronGetFloatArray(float *arr1, float *arr2, int n_elem, int step1, int step2) { int array_idx = threadIdx.x + blockIdx.x * blockDim.x; if (array_idx<n_elem) { arr2[array_idx*step2] = arr1[array_idx*step1]; } }
0978b15292f1c13deec24393064f23760e98f65a.cu
#include "includes.h" __global__ void BaseNeuronGetFloatArray(float *arr1, float *arr2, int n_elem, int step1, int step2) { int array_idx = threadIdx.x + blockIdx.x * blockDim.x; if (array_idx<n_elem) { arr2[array_idx*step2] = arr1[array_idx*step1]; } }
79527d9b8066d6406f7a760d909443c73b42bbb5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "scatterKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const unsigned int *d_In = NULL; hipMalloc(&d_In, XSIZE*YSIZE); const unsigned int *d_InPos = NULL; hipMalloc(&d_InPos, XSIZE*YSIZE); const unsigned int *d_FalseKeyAddresses = NULL; hipMalloc(&d_FalseKeyAddresses, XSIZE*YSIZE); unsigned int *d_Out = NULL; hipMalloc(&d_Out, XSIZE*YSIZE); unsigned int *d_OutPos = NULL; hipMalloc(&d_OutPos, XSIZE*YSIZE); const unsigned int totalFalses = 1; size_t size = XSIZE*YSIZE; unsigned int bitPos = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( scatterKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_In,d_InPos,d_FalseKeyAddresses,d_Out,d_OutPos,totalFalses,size,bitPos); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( scatterKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_In,d_InPos,d_FalseKeyAddresses,d_Out,d_OutPos,totalFalses,size,bitPos); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( scatterKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_In,d_InPos,d_FalseKeyAddresses,d_Out,d_OutPos,totalFalses,size,bitPos); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
79527d9b8066d6406f7a760d909443c73b42bbb5.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "scatterKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const unsigned int *d_In = NULL; cudaMalloc(&d_In, XSIZE*YSIZE); const unsigned int *d_InPos = NULL; cudaMalloc(&d_InPos, XSIZE*YSIZE); const unsigned int *d_FalseKeyAddresses = NULL; cudaMalloc(&d_FalseKeyAddresses, XSIZE*YSIZE); unsigned int *d_Out = NULL; cudaMalloc(&d_Out, XSIZE*YSIZE); unsigned int *d_OutPos = NULL; cudaMalloc(&d_OutPos, XSIZE*YSIZE); const unsigned int totalFalses = 1; size_t size = XSIZE*YSIZE; unsigned int bitPos = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); scatterKernel<<<gridBlock,threadBlock>>>(d_In,d_InPos,d_FalseKeyAddresses,d_Out,d_OutPos,totalFalses,size,bitPos); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { scatterKernel<<<gridBlock,threadBlock>>>(d_In,d_InPos,d_FalseKeyAddresses,d_Out,d_OutPos,totalFalses,size,bitPos); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { scatterKernel<<<gridBlock,threadBlock>>>(d_In,d_InPos,d_FalseKeyAddresses,d_Out,d_OutPos,totalFalses,size,bitPos); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4fac64edb88045ac4756a24866d55b3cb073c0ac.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <time.h> #define MAX_CUDA_THREADS_PER_BLOCK 1024 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } struct Startup{ int random_range = INT_MAX; int threads_per_block = MAX_CUDA_THREADS_PER_BLOCK; } startup; struct DataSet{ float* values; int size; }; struct Result{ float MaxValue; float KernelExecutionTime; }; DataSet generateRandomDataSet(int size){ DataSet data; data.size = size; data.values = (float*)malloc(sizeof(float)*data.size); for (int i = 0; i < data.size; i++) data.values[i] = (float)(rand()%startup.random_range); return data; } __global__ void Max_Interleaved_Addressing_Global(float* data, int data_size){ int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < data_size){ for(int stride=1; stride < data_size; stride *= 2) { if (idx % (2*stride) == 0) { float lhs = data[idx]; float rhs = data[idx + stride]; data[idx] = lhs < rhs ? rhs : lhs; } __syncthreads(); } } } __global__ void Max_Interleaved_Addressing_Shared(float* data, int data_size){ int idx = blockDim.x * blockIdx.x + threadIdx.x; __shared__ float sdata[MAX_CUDA_THREADS_PER_BLOCK]; if (idx < data_size){ /*copy to shared memory*/ sdata[threadIdx.x] = data[idx]; __syncthreads(); for(int stride=1; stride < blockDim.x; stride *= 2) { if (threadIdx.x % (2*stride) == 0) { float lhs = sdata[threadIdx.x]; float rhs = sdata[threadIdx.x + stride]; sdata[threadIdx.x] = lhs < rhs ? rhs : lhs; } __syncthreads(); } } if (idx == 0) data[0] = sdata[0]; } __global__ void Max_Sequential_Addressing_Shared(float* data, int data_size){ int idx = blockDim.x * blockIdx.x + threadIdx.x; __shared__ float sdata[MAX_CUDA_THREADS_PER_BLOCK]; if (idx < data_size){ /*copy to shared memory*/ sdata[threadIdx.x] = data[idx]; __syncthreads(); for(int stride=blockDim.x/2; stride > 0; stride /= 2) { if (threadIdx.x < stride) { float lhs = sdata[threadIdx.x]; float rhs = sdata[threadIdx.x + stride]; sdata[threadIdx.x] = lhs < rhs ? rhs : lhs; } __syncthreads(); } } if (idx == 0) data[0] = sdata[0]; } /*Algorithm Information. Includes pointers to different kernels, so they can be executed dynamically*/ const int Algorithm_Count = 3; typedef void (*Kernel)(float *, int); const char* Algorithm_Name[Algorithm_Count]= {"Max_Interleaved_Addressing_Global", "Max_Interleaved_Addressing_Shared", "Max_Sequential_Addressing_Shared"}; const Kernel Algorithm[Algorithm_Count] = { Max_Interleaved_Addressing_Global, Max_Interleaved_Addressing_Shared, Max_Sequential_Addressing_Shared}; Result calculateMaxValue(DataSet data, Kernel algorithm){ float* device_data; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); gpuErrchk(hipMalloc((void **)&device_data, sizeof(float)*data.size)); gpuErrchk(hipMemcpy(device_data, data.values, sizeof(float)*data.size, hipMemcpyHostToDevice)); int threads_needed = data.size; hipEventRecord(start); hipLaunchKernelGGL(( algorithm), dim3(threads_needed/ startup.threads_per_block + 1), dim3(startup.threads_per_block), 0, 0, device_data, data.size); hipEventRecord(stop); gpuErrchk(hipEventSynchronize(stop)); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); float max_value; gpuErrchk(hipMemcpy(&max_value, device_data, sizeof(float), hipMemcpyDeviceToHost)); gpuErrchk(hipFree(device_data)); Result r = {max_value, milliseconds}; return r; } Result calculateMaxValue(DataSet data){ return calculateMaxValue(data, Algorithm[Algorithm_Count - 1]); } void printDataSet(DataSet data){ for (int i = 0; i < data.size; i++) printf("%.6g, ", data.values[i]); printf("\n"); } void benchmarkCSV(){ /*Print Headers*/ printf("Elements, "); for (int algoID = 0; algoID < Algorithm_Count; algoID++) printf("%s, ", Algorithm_Name[algoID]); printf("\n"); /*Benchmark*/ for (int dataSize = 2; dataSize < INT_MAX; dataSize*=2){ DataSet random = generateRandomDataSet(dataSize); printf("%d, ", dataSize); for (int algoID = 0; algoID < Algorithm_Count; algoID++) { Result r = calculateMaxValue(random, Algorithm[algoID]); printf("%g, ", r.KernelExecutionTime); } printf("\n"); free(random.values); } } int main(int argc, char** argv){ srand(time(nullptr)); benchmarkCSV(); }
4fac64edb88045ac4756a24866d55b3cb073c0ac.cu
#include <stdio.h> #include <cuda.h> #include <time.h> #define MAX_CUDA_THREADS_PER_BLOCK 1024 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } struct Startup{ int random_range = INT_MAX; int threads_per_block = MAX_CUDA_THREADS_PER_BLOCK; } startup; struct DataSet{ float* values; int size; }; struct Result{ float MaxValue; float KernelExecutionTime; }; DataSet generateRandomDataSet(int size){ DataSet data; data.size = size; data.values = (float*)malloc(sizeof(float)*data.size); for (int i = 0; i < data.size; i++) data.values[i] = (float)(rand()%startup.random_range); return data; } __global__ void Max_Interleaved_Addressing_Global(float* data, int data_size){ int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < data_size){ for(int stride=1; stride < data_size; stride *= 2) { if (idx % (2*stride) == 0) { float lhs = data[idx]; float rhs = data[idx + stride]; data[idx] = lhs < rhs ? rhs : lhs; } __syncthreads(); } } } __global__ void Max_Interleaved_Addressing_Shared(float* data, int data_size){ int idx = blockDim.x * blockIdx.x + threadIdx.x; __shared__ float sdata[MAX_CUDA_THREADS_PER_BLOCK]; if (idx < data_size){ /*copy to shared memory*/ sdata[threadIdx.x] = data[idx]; __syncthreads(); for(int stride=1; stride < blockDim.x; stride *= 2) { if (threadIdx.x % (2*stride) == 0) { float lhs = sdata[threadIdx.x]; float rhs = sdata[threadIdx.x + stride]; sdata[threadIdx.x] = lhs < rhs ? rhs : lhs; } __syncthreads(); } } if (idx == 0) data[0] = sdata[0]; } __global__ void Max_Sequential_Addressing_Shared(float* data, int data_size){ int idx = blockDim.x * blockIdx.x + threadIdx.x; __shared__ float sdata[MAX_CUDA_THREADS_PER_BLOCK]; if (idx < data_size){ /*copy to shared memory*/ sdata[threadIdx.x] = data[idx]; __syncthreads(); for(int stride=blockDim.x/2; stride > 0; stride /= 2) { if (threadIdx.x < stride) { float lhs = sdata[threadIdx.x]; float rhs = sdata[threadIdx.x + stride]; sdata[threadIdx.x] = lhs < rhs ? rhs : lhs; } __syncthreads(); } } if (idx == 0) data[0] = sdata[0]; } /*Algorithm Information. Includes pointers to different kernels, so they can be executed dynamically*/ const int Algorithm_Count = 3; typedef void (*Kernel)(float *, int); const char* Algorithm_Name[Algorithm_Count]= {"Max_Interleaved_Addressing_Global", "Max_Interleaved_Addressing_Shared", "Max_Sequential_Addressing_Shared"}; const Kernel Algorithm[Algorithm_Count] = { Max_Interleaved_Addressing_Global, Max_Interleaved_Addressing_Shared, Max_Sequential_Addressing_Shared}; Result calculateMaxValue(DataSet data, Kernel algorithm){ float* device_data; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); gpuErrchk(cudaMalloc((void **)&device_data, sizeof(float)*data.size)); gpuErrchk(cudaMemcpy(device_data, data.values, sizeof(float)*data.size, cudaMemcpyHostToDevice)); int threads_needed = data.size; cudaEventRecord(start); algorithm<<< threads_needed/ startup.threads_per_block + 1, startup.threads_per_block>>>(device_data, data.size); cudaEventRecord(stop); gpuErrchk(cudaEventSynchronize(stop)); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); float max_value; gpuErrchk(cudaMemcpy(&max_value, device_data, sizeof(float), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(device_data)); Result r = {max_value, milliseconds}; return r; } Result calculateMaxValue(DataSet data){ return calculateMaxValue(data, Algorithm[Algorithm_Count - 1]); } void printDataSet(DataSet data){ for (int i = 0; i < data.size; i++) printf("%.6g, ", data.values[i]); printf("\n"); } void benchmarkCSV(){ /*Print Headers*/ printf("Elements, "); for (int algoID = 0; algoID < Algorithm_Count; algoID++) printf("%s, ", Algorithm_Name[algoID]); printf("\n"); /*Benchmark*/ for (int dataSize = 2; dataSize < INT_MAX; dataSize*=2){ DataSet random = generateRandomDataSet(dataSize); printf("%d, ", dataSize); for (int algoID = 0; algoID < Algorithm_Count; algoID++) { Result r = calculateMaxValue(random, Algorithm[algoID]); printf("%g, ", r.KernelExecutionTime); } printf("\n"); free(random.values); } } int main(int argc, char** argv){ srand(time(nullptr)); benchmarkCSV(); }
8e7e8479aae80fb0896249c77ed19a96c12addfb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "grasta_cuda_kernels.cuh" __global__ void shrinkKernel(const float *x, float *s, float gamma){ int i = blockDim.x * blockIdx.x + threadIdx.x; const float t = x[i]; s[i] = (t - gamma * ((t > 0) - (t < 0))) * (fabsf(t) > gamma); } __global__ void larbOrthAltKernel(float *B, float *x, float *w, float *s, float *y, float rho, float maxiter) { for(int i = 0; i < maxiter; ++i){ } } __global__ void setupSparseKernel_2(int use_number, int m, int n, float scale, float *smallx, float *smallB, float *B, int *use_index) { int i = blockIdx.x * blockDim.x + threadIdx.x; smallx[i] = smallx[i] / scale; __syncthreads(); for(int j = 0; j < n; ++j){ smallB[ j * use_number + i ] = B[ j * m + use_index[i]]; } }
8e7e8479aae80fb0896249c77ed19a96c12addfb.cu
#include "grasta_cuda_kernels.cuh" __global__ void shrinkKernel(const float *x, float *s, float gamma){ int i = blockDim.x * blockIdx.x + threadIdx.x; const float t = x[i]; s[i] = (t - gamma * ((t > 0) - (t < 0))) * (fabsf(t) > gamma); } __global__ void larbOrthAltKernel(float *B, float *x, float *w, float *s, float *y, float rho, float maxiter) { for(int i = 0; i < maxiter; ++i){ } } __global__ void setupSparseKernel_2(int use_number, int m, int n, float scale, float *smallx, float *smallB, float *B, int *use_index) { int i = blockIdx.x * blockDim.x + threadIdx.x; smallx[i] = smallx[i] / scale; __syncthreads(); for(int j = 0; j < n; ++j){ smallB[ j * use_number + i ] = B[ j * m + use_index[i]]; } }
8087ea857b5a9de30345c394ae8f14de2bd6bcb4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @author Mark Gates @generated from zlanhe.cu normal z -> c, Tue Sep 2 12:38:15 2014 */ #include "common_magma.h" #define inf_bs 32 #define max_bs 64 #define PRECISION_c __global__ void clanhe_inf_kernel_generic_upper( int n, const magmaFloatComplex* A, int lda, float *dwork, int n_full_block, int n_mod_bs ); /* ====================================================================== */ /* inf-norm */ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored lower. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). */ __global__ void clanhe_inf_kernel_generic_lower( int n, const magmaFloatComplex* A, int lda, float *dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ magmaFloatComplex la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block rows A += ind; A += ty * lda; // ---------- // loop over all blocks left of the diagonal block for(int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy lower triangle to upper triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for(int i=ty*8; i < ty*8 + 8; i++) { if ( i < tx ) { la[i][tx] = la[tx][i]; } #if defined(PRECISION_z) || defined(PRECISION_c) else if ( i == tx ) { la[i][i] = MAGMA_C_MAKE( MAGMA_C_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks below diagonal block A += inf_bs; for(int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (transposed) #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; } A += inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is (n_mod_bs by inf_bs) if ( n_mod_bs > 0 ) { // load block (transposed), with zeros for rows outside matrix #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( tx < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } else { la[ty+j][tx] = MAGMA_C_ZERO; } } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block row // Threads past end of matrix (i.e., ind >= n) are redundantly assigned // the last row (n-1). At the end, those results are ignored -- only // results for ind < n are saved into dwork. if ( tx < n_mod_bs ) { A += ind; } else { A += (blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row } A += ty * lda; // ---------- // loop over all blocks left of the diagonal block // each is (n_mod_bs by inf_bs) for(int i=0; i < diag; i += inf_bs ) { // load block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for(int j=0; j < 8; j++) { res += cuCabsf( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // sum rows left of diagonal for(int j=0; j < tx; j++) { res += cuCabsf( *A ); A += lda; } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_C_REAL( *A )); A += 1; // sum column below diagonal for(int j=tx+1; j < n_mod_bs; j++) { res += cuCabsf( *A ); A += 1; } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored upper. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). * The upper implementation is similar to lower, but processes blocks * in the transposed order: * lower goes from left over to diagonal, then down to bottom; * upper goes from top down to diagonal, then over to right. * Differences are noted with # in comments. */ __global__ void clanhe_inf_kernel_generic_upper( int n, const magmaFloatComplex* A, int lda, float *dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ magmaFloatComplex la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block #columns A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block for(int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block (#transposed) #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; //# } A += inf_bs; //# __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy #upper triangle to #lower triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for(int i=ty*8; i < ty*8 + 8; i++) { if ( i > tx ) { //# la[i][tx] = la[tx][i]; } #if defined(PRECISION_z) || defined(PRECISION_c) else if ( i == tx ) { la[i][i] = MAGMA_C_MAKE( MAGMA_C_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks #right of diagonal block A += inf_bs*lda; //# for(int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (#non-transposed) #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; //# } A += inf_bs*lda; //# __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is #(inf_bs by n_mod_bs) if ( n_mod_bs > 0 ) { // load block (#non-transposed), with zeros for #cols outside matrix #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { //# la[tx][ty+j] = A[j*lda]; //# } else { la[tx][ty+j] = MAGMA_C_ZERO; //# } } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block #column // Instead of assigning threads ind >= n to the last row (n-1), as in Lower, // Upper simply adjusts loop bounds to avoid loading columns outside the matrix. // Again, at the end, those results are ignored -- only // results for ind < n are saved into dwork. A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block // each is #(inf_bs by n_mod_bs) for(int i=0; i < diag; i += inf_bs ) { // load block (#transposed), #ignoring columns outside matrix #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } } A += inf_bs; //# __syncthreads(); // partial row sums #pragma unroll 8 for(int j=0; j < 8; j++) { res += cuCabsf( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // #transpose pointer within diagonal block // #i.e., from A = A(tx,ty), transpose to A = A(ty,tx). A = A - tx - ty*lda + tx*lda + ty; // sum #column above diagonal for(int j=0; j < tx; j++) { res += cuCabsf( *A ); A += 1; //# } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_C_REAL( *A )); A += lda; //# // sum #row right of diagonal for(int j=tx+1; j < n_mod_bs; j++) { res += cuCabsf( *A ); A += lda; //# } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */ extern "C" void clanhe_inf( magma_uplo_t uplo, int n, const magmaFloatComplex *A, int lda, float *dwork ) { int blocks = (n - 1)/inf_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(inf_bs, 4, 1); int n_full_block = (n - n % inf_bs) /inf_bs; int n_mod_bs = n % inf_bs; if ( uplo == MagmaLower) { hipLaunchKernelGGL(( clanhe_inf_kernel_generic_lower), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork, n_full_block, n_mod_bs ); } else { hipLaunchKernelGGL(( clanhe_inf_kernel_generic_upper), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork, n_full_block, n_mod_bs ); } } /* ====================================================================== */ /* max-norm */ /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */ __global__ void clanhe_max_kernel_lower( int n, const magmaFloatComplex* A, int lda, float *dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; float res = 0; if (ind < n) { A += ind; for(int j=0; j < ind; ++j) { res = fmax( res, cuCabsf( *A )); A += lda; } // diagonal element (ignoring imaginary part) res = fmax( res, MAGMA_D_ABS( MAGMA_C_REAL( *A ))); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. */ __global__ void clanhe_max_kernel_upper( int n, const magmaFloatComplex* A, int lda, float *dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; float res = 0; if (ind < n) { A += ind; A += (n-1)*lda; for(int j=n-1; j > ind; j--) { res = fmax( res, cuCabsf( *A )); A -= lda; } // diagonal element (ignoring imaginary part) res = fmax( res, MAGMA_D_ABS( MAGMA_C_REAL( *A ))); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */ extern "C" void clanhe_max( magma_uplo_t uplo, int n, const magmaFloatComplex *A, int lda, float *dwork ) { int blocks = (n - 1)/max_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(max_bs, 1, 1); if ( uplo == MagmaLower ) { hipLaunchKernelGGL(( clanhe_max_kernel_lower), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork ); } else { hipLaunchKernelGGL(( clanhe_max_kernel_upper), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork ); } } /* ====================================================================== */ /** Purpose ------- CLANHE returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a complex Hermitian matrix A. CLANHE = ( max(abs(A(i,j))), NORM = 'M' or 'm' ( ( norm1(A), NORM = '1', 'O' or 'o' ** supported only for (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) ( ( normI(A), NORM = 'I' or 'i' ** supported only for (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Returns CLANHE < 0: if CLANHE = -i, the i-th argument had an illegal value. Arguments: ---------- @param[in] norm CHARACTER*1 Specifies the value to be returned in CLANHE as described above. @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the Hermitian matrix A is to be referenced. - = MagmaUpper: Upper triangular part of A is referenced - = MagmaLower: Lower triangular part of A is referenced @param[in] n INTEGER The order of the matrix A. N >= 0. When N = 0, CLANHE is set to zero. @param[in] A COMPLEX array on the GPU, dimension (LDA,N) The Hermitian matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(N,1). @param dwork (workspace) REAL array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= N. NOTE: this is different than LAPACK, where WORK is required only for norm1 and normI. Here max-norm also requires work. @ingroup magma_caux2 ********************************************************************/ extern "C" float magmablas_clanhe( magma_norm_t norm, magma_uplo_t uplo, magma_int_t n, const magmaFloatComplex *A, magma_int_t lda, float *dwork ) { magma_int_t info = 0; magma_int_t arch = magma_getdevice_arch(); // 1-norm == inf-norm since A is Hermitian bool inf_norm = (norm == MagmaInfNorm || norm == MagmaOneNorm); bool max_norm = (norm == MagmaMaxNorm); // inf_norm Double-Complex requires > 16 KB shared data (arch >= 200) #if defined(PRECISION_z) const bool inf_implemented = (magma_getdevice_arch() >= 200); #else const bool inf_implemented = true; #endif if ( ! (max_norm || (inf_norm && inf_implemented)) ) info = -1; else if ( uplo != MagmaUpper && uplo != MagmaLower ) info = -2; else if ( n < 0 ) info = -3; else if ( lda < n ) info = -5; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( n == 0 ) return 0; float res = 0; if ( inf_norm ) { clanhe_inf( uplo, n, A, lda, dwork ); } else { clanhe_max( uplo, n, A, lda, dwork ); } int i = magma_isamax( n, dwork, 1 ) - 1; hipMemcpy( &res, &dwork[i], sizeof(float), hipMemcpyDeviceToHost ); return res; }
8087ea857b5a9de30345c394ae8f14de2bd6bcb4.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @author Mark Gates @generated from zlanhe.cu normal z -> c, Tue Sep 2 12:38:15 2014 */ #include "common_magma.h" #define inf_bs 32 #define max_bs 64 #define PRECISION_c __global__ void clanhe_inf_kernel_generic_upper( int n, const magmaFloatComplex* A, int lda, float *dwork, int n_full_block, int n_mod_bs ); /* ====================================================================== */ /* inf-norm */ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored lower. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). */ __global__ void clanhe_inf_kernel_generic_lower( int n, const magmaFloatComplex* A, int lda, float *dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ magmaFloatComplex la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block rows A += ind; A += ty * lda; // ---------- // loop over all blocks left of the diagonal block for(int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy lower triangle to upper triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for(int i=ty*8; i < ty*8 + 8; i++) { if ( i < tx ) { la[i][tx] = la[tx][i]; } #if defined(PRECISION_z) || defined(PRECISION_c) else if ( i == tx ) { la[i][i] = MAGMA_C_MAKE( MAGMA_C_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks below diagonal block A += inf_bs; for(int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (transposed) #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; } A += inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is (n_mod_bs by inf_bs) if ( n_mod_bs > 0 ) { // load block (transposed), with zeros for rows outside matrix #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( tx < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } else { la[ty+j][tx] = MAGMA_C_ZERO; } } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block row // Threads past end of matrix (i.e., ind >= n) are redundantly assigned // the last row (n-1). At the end, those results are ignored -- only // results for ind < n are saved into dwork. if ( tx < n_mod_bs ) { A += ind; } else { A += (blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row } A += ty * lda; // ---------- // loop over all blocks left of the diagonal block // each is (n_mod_bs by inf_bs) for(int i=0; i < diag; i += inf_bs ) { // load block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for(int j=0; j < 8; j++) { res += cuCabsf( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // sum rows left of diagonal for(int j=0; j < tx; j++) { res += cuCabsf( *A ); A += lda; } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_C_REAL( *A )); A += 1; // sum column below diagonal for(int j=tx+1; j < n_mod_bs; j++) { res += cuCabsf( *A ); A += 1; } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored upper. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). * The upper implementation is similar to lower, but processes blocks * in the transposed order: * lower goes from left over to diagonal, then down to bottom; * upper goes from top down to diagonal, then over to right. * Differences are noted with # in comments. */ __global__ void clanhe_inf_kernel_generic_upper( int n, const magmaFloatComplex* A, int lda, float *dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ magmaFloatComplex la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block #columns A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block for(int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block (#transposed) #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; //# } A += inf_bs; //# __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy #upper triangle to #lower triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for(int i=ty*8; i < ty*8 + 8; i++) { if ( i > tx ) { //# la[i][tx] = la[tx][i]; } #if defined(PRECISION_z) || defined(PRECISION_c) else if ( i == tx ) { la[i][i] = MAGMA_C_MAKE( MAGMA_C_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks #right of diagonal block A += inf_bs*lda; //# for(int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (#non-transposed) #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; //# } A += inf_bs*lda; //# __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is #(inf_bs by n_mod_bs) if ( n_mod_bs > 0 ) { // load block (#non-transposed), with zeros for #cols outside matrix #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { //# la[tx][ty+j] = A[j*lda]; //# } else { la[tx][ty+j] = MAGMA_C_ZERO; //# } } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block #column // Instead of assigning threads ind >= n to the last row (n-1), as in Lower, // Upper simply adjusts loop bounds to avoid loading columns outside the matrix. // Again, at the end, those results are ignored -- only // results for ind < n are saved into dwork. A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block // each is #(inf_bs by n_mod_bs) for(int i=0; i < diag; i += inf_bs ) { // load block (#transposed), #ignoring columns outside matrix #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } } A += inf_bs; //# __syncthreads(); // partial row sums #pragma unroll 8 for(int j=0; j < 8; j++) { res += cuCabsf( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // #transpose pointer within diagonal block // #i.e., from A = A(tx,ty), transpose to A = A(ty,tx). A = A - tx - ty*lda + tx*lda + ty; // sum #column above diagonal for(int j=0; j < tx; j++) { res += cuCabsf( *A ); A += 1; //# } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_C_REAL( *A )); A += lda; //# // sum #row right of diagonal for(int j=tx+1; j < n_mod_bs; j++) { res += cuCabsf( *A ); A += lda; //# } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */ extern "C" void clanhe_inf( magma_uplo_t uplo, int n, const magmaFloatComplex *A, int lda, float *dwork ) { int blocks = (n - 1)/inf_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(inf_bs, 4, 1); int n_full_block = (n - n % inf_bs) /inf_bs; int n_mod_bs = n % inf_bs; if ( uplo == MagmaLower) { clanhe_inf_kernel_generic_lower<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork, n_full_block, n_mod_bs ); } else { clanhe_inf_kernel_generic_upper<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork, n_full_block, n_mod_bs ); } } /* ====================================================================== */ /* max-norm */ /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */ __global__ void clanhe_max_kernel_lower( int n, const magmaFloatComplex* A, int lda, float *dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; float res = 0; if (ind < n) { A += ind; for(int j=0; j < ind; ++j) { res = fmax( res, cuCabsf( *A )); A += lda; } // diagonal element (ignoring imaginary part) res = fmax( res, MAGMA_D_ABS( MAGMA_C_REAL( *A ))); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. */ __global__ void clanhe_max_kernel_upper( int n, const magmaFloatComplex* A, int lda, float *dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; float res = 0; if (ind < n) { A += ind; A += (n-1)*lda; for(int j=n-1; j > ind; j--) { res = fmax( res, cuCabsf( *A )); A -= lda; } // diagonal element (ignoring imaginary part) res = fmax( res, MAGMA_D_ABS( MAGMA_C_REAL( *A ))); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */ extern "C" void clanhe_max( magma_uplo_t uplo, int n, const magmaFloatComplex *A, int lda, float *dwork ) { int blocks = (n - 1)/max_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(max_bs, 1, 1); if ( uplo == MagmaLower ) { clanhe_max_kernel_lower<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork ); } else { clanhe_max_kernel_upper<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork ); } } /* ====================================================================== */ /** Purpose ------- CLANHE returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a complex Hermitian matrix A. CLANHE = ( max(abs(A(i,j))), NORM = 'M' or 'm' ( ( norm1(A), NORM = '1', 'O' or 'o' ** supported only for (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) ( ( normI(A), NORM = 'I' or 'i' ** supported only for (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Returns CLANHE < 0: if CLANHE = -i, the i-th argument had an illegal value. Arguments: ---------- @param[in] norm CHARACTER*1 Specifies the value to be returned in CLANHE as described above. @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the Hermitian matrix A is to be referenced. - = MagmaUpper: Upper triangular part of A is referenced - = MagmaLower: Lower triangular part of A is referenced @param[in] n INTEGER The order of the matrix A. N >= 0. When N = 0, CLANHE is set to zero. @param[in] A COMPLEX array on the GPU, dimension (LDA,N) The Hermitian matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(N,1). @param dwork (workspace) REAL array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= N. NOTE: this is different than LAPACK, where WORK is required only for norm1 and normI. Here max-norm also requires work. @ingroup magma_caux2 ********************************************************************/ extern "C" float magmablas_clanhe( magma_norm_t norm, magma_uplo_t uplo, magma_int_t n, const magmaFloatComplex *A, magma_int_t lda, float *dwork ) { magma_int_t info = 0; magma_int_t arch = magma_getdevice_arch(); // 1-norm == inf-norm since A is Hermitian bool inf_norm = (norm == MagmaInfNorm || norm == MagmaOneNorm); bool max_norm = (norm == MagmaMaxNorm); // inf_norm Double-Complex requires > 16 KB shared data (arch >= 200) #if defined(PRECISION_z) const bool inf_implemented = (magma_getdevice_arch() >= 200); #else const bool inf_implemented = true; #endif if ( ! (max_norm || (inf_norm && inf_implemented)) ) info = -1; else if ( uplo != MagmaUpper && uplo != MagmaLower ) info = -2; else if ( n < 0 ) info = -3; else if ( lda < n ) info = -5; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( n == 0 ) return 0; float res = 0; if ( inf_norm ) { clanhe_inf( uplo, n, A, lda, dwork ); } else { clanhe_max( uplo, n, A, lda, dwork ); } int i = magma_isamax( n, dwork, 1 ) - 1; cudaMemcpy( &res, &dwork[i], sizeof(float), cudaMemcpyDeviceToHost ); return res; }
a5a6cb1d6c865650e43569afd3ccb1ae5689c161.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* MIT License Copyright 2020 Jee W. Choi, Marat Dukhan, and Xing Liu Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "common.h" #ifndef BLOCK_SIZE #define BLOCK_SIZE 1024 #endif __global__ void cache_kernel_1 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; __syncthreads (); if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_2 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_4 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_8 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_16 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_32 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_64 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_128 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_256 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_512 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_1024 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_2048 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; // read from smem (pointer chasing) /* #pragma unroll 2048 for(int i = 0; i < 2048; i++) { tmp = s[tmp]; } */ out[tid] = tmp; } } __global__ void cache_kernel_4096 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; __syncthreads (); if(tid < nThreads) { /* tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; */ // read from smem (pointer chasing) #pragma unroll 2048 for(int i = 0; i < 4096; i++) { tmp = s[tmp]; } out[tid] = tmp; } }
a5a6cb1d6c865650e43569afd3ccb1ae5689c161.cu
/* MIT License Copyright 2020 Jee W. Choi, Marat Dukhan, and Xing Liu Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "common.h" #ifndef BLOCK_SIZE #define BLOCK_SIZE 1024 #endif __global__ void cache_kernel_1 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; __syncthreads (); if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_2 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_4 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_8 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_16 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_32 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_64 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_128 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_256 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_512 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_1024 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { // read from smem (pointer chasing) tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; out[tid] = tmp; } } __global__ void cache_kernel_2048 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; if(tid < nThreads) { tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; // read from smem (pointer chasing) /* #pragma unroll 2048 for(int i = 0; i < 2048; i++) { tmp = s[tmp]; } */ out[tid] = tmp; } } __global__ void cache_kernel_4096 (int nThreads, int* out, int* chase) { int tmp; uint tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s[BLOCK_SIZE]; // read from DRAM and write to SMem tmp = chase[threadIdx.x]; s[threadIdx.x] = tmp; __syncthreads (); if(tid < nThreads) { /* tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; tmp = s[tmp]; */ // read from smem (pointer chasing) #pragma unroll 2048 for(int i = 0; i < 4096; i++) { tmp = s[tmp]; } out[tid] = tmp; } }
248c3d34a8d2ad38e3a18f62f4e89eff6c29a118.hip
// !!! This is a file automatically generated by hipify!!! /* This file consists of CUDA code which is compiled with the CuMat program and linked in with the output with clang. Most of this is just setup for llvm with cuBLAS so that we can call the functions and have them return CuMat data (row major) */ #include <cstring> #include <rocblas.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "../utils/headers.hpp" // Device function __global__ void CuMatLORMatrixDKernel(double* A, double* B, double * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (double) (A[index] || B[index]); } } extern "C" void CuMatLORMatrixD(HeaderD* matHeaderA, HeaderD* matHeaderB, HeaderD* matHeaderRes, long len){ double* matA; double* matB; double* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; double* d_A; double *d_B; double * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA hipMallocManaged(&d_A, size); hipMallocManaged(&d_B, size); hipMallocManaged(&d_Res, size); // Copy over the matricies into device memory hipMemcpy(d_A, matA, size, hipMemcpyHostToDevice); hipMemcpy(d_B, matB, size, hipMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel hipLaunchKernelGGL(( CuMatLORMatrixDKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_Res, len); // Synchronise before copying hipDeviceSynchronize(); // Copy the results out of device memory hipMemcpy(matRes, d_Res, size, hipMemcpyDeviceToHost); // Free up cuda malloc hipFree(&d_A); hipFree(&d_B); hipFree(&d_Res); } // Device function __global__ void CuMatLORMatrixIKernel(long* A, long* B, long * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (long) (A[index] || B[index]); } } extern "C" void CuMatLORMatrixI(HeaderI* matHeaderA, HeaderI* matHeaderB, HeaderI* matHeaderRes, long len){ long* matA; long* matB; long* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; long* d_A; long *d_B; long * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA hipMallocManaged(&d_A, size); hipMallocManaged(&d_B, size); hipMallocManaged(&d_Res, size); // Copy over the matricies into device memory hipMemcpy(d_A, matA, size, hipMemcpyHostToDevice); hipMemcpy(d_B, matB, size, hipMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel hipLaunchKernelGGL(( CuMatLORMatrixIKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_Res, len); // Synchronise before copying hipDeviceSynchronize(); // Copy the results out of device memory hipMemcpy(matRes, d_Res, size, hipMemcpyDeviceToHost); // Free up cuda malloc hipFree(&d_A); hipFree(&d_B); hipFree(&d_Res); } /* * Logical AND Kernel functions */ // Device function __global__ void CuMatLANDMatrixDKernel(double* A, double* B, double * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (double) (A[index] && B[index]); } } extern "C" void CuMatLANDMatrixD(HeaderD* matHeaderA, HeaderD* matHeaderB, HeaderD* matHeaderRes, long len){ double* matA; double* matB; double* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; double* d_A; double *d_B; double * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA hipMallocManaged(&d_A, size); hipMallocManaged(&d_B, size); hipMallocManaged(&d_Res, size); // Copy over the matricies into device memory hipMemcpy(d_A, matA, size, hipMemcpyHostToDevice); hipMemcpy(d_B, matB, size, hipMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel hipLaunchKernelGGL(( CuMatLANDMatrixDKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_Res, len); // Synchronise before copying hipDeviceSynchronize(); // Copy the results out of device memory hipMemcpy(matRes, d_Res, size, hipMemcpyDeviceToHost); // Free up cuda malloc hipFree(&d_A); hipFree(&d_B); hipFree(&d_Res); } // Device function __global__ void CuMatLANDMatrixIKernel(long* A, long* B, long * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (long) (A[index] && B[index]); } } extern "C" void CuMatLANDMatrixI(HeaderI* matHeaderA, HeaderI* matHeaderB, HeaderI* matHeaderRes, long len){ long* matA; long* matB; long* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; long* d_A; long *d_B; long * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA hipMallocManaged(&d_A, size); hipMallocManaged(&d_B, size); hipMallocManaged(&d_Res, size); // Copy over the matricies into device memory hipMemcpy(d_A, matA, size, hipMemcpyHostToDevice); hipMemcpy(d_B, matB, size, hipMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel hipLaunchKernelGGL(( CuMatLANDMatrixIKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_Res, len); // Synchronise before copying hipDeviceSynchronize(); // Copy the results out of device memory hipMemcpy(matRes, d_Res, size, hipMemcpyDeviceToHost); // Free up cuda malloc hipFree(&d_A); hipFree(&d_B); hipFree(&d_Res); } /* * Bitwise functions */ // Device function __global__ void CuMatBORMatrixDKernel(double* A, double* B, double * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (double) (((long)A[index]) | ((long)B[index])); } } extern "C" void CuMatBORMatrixD(HeaderD* matHeaderA, HeaderD* matHeaderB, HeaderD* matHeaderRes, long len){ double* matA; double* matB; double* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; double* d_A; double *d_B; double * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA hipMallocManaged(&d_A, size); hipMallocManaged(&d_B, size); hipMallocManaged(&d_Res, size); // Copy over the matricies into device memory hipMemcpy(d_A, matA, size, hipMemcpyHostToDevice); hipMemcpy(d_B, matB, size, hipMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel hipLaunchKernelGGL(( CuMatBORMatrixDKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_Res, len); // Synchronise before copying hipDeviceSynchronize(); // Copy the results out of device memory hipMemcpy(matRes, d_Res, size, hipMemcpyDeviceToHost); // Free up cuda malloc hipFree(&d_A); hipFree(&d_B); hipFree(&d_Res); } // Device function __global__ void CuMatBORMatrixIKernel(long* A, long* B, long * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (long) (A[index] | B[index]); } } extern "C" void CuMatBORMatrixI(HeaderI* matHeaderA, HeaderI* matHeaderB, HeaderI* matHeaderRes, long len){ long* matA; long* matB; long* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; long* d_A; long *d_B; long * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA hipMallocManaged(&d_A, size); hipMallocManaged(&d_B, size); hipMallocManaged(&d_Res, size); // Copy over the matricies into device memory hipMemcpy(d_A, matA, size, hipMemcpyHostToDevice); hipMemcpy(d_B, matB, size, hipMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel hipLaunchKernelGGL(( CuMatBORMatrixIKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_Res, len); // Synchronise before copying hipDeviceSynchronize(); // Copy the results out of device memory hipMemcpy(matRes, d_Res, size, hipMemcpyDeviceToHost); // Free up cuda malloc hipFree(&d_A); hipFree(&d_B); hipFree(&d_Res); } /* * Logical AND Kernel functions */ // Device function __global__ void CuMatBANDMatrixDKernel(double* A, double* B, double * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (double) (((long)A[index]) & ((long)B[index])); } } extern "C" void CuMatBANDMatrixD(HeaderD* matHeaderA, HeaderD* matHeaderB, HeaderD* matHeaderRes, long len){ double* matA; double* matB; double* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; double* d_A; double *d_B; double * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA hipMallocManaged(&d_A, size); hipMallocManaged(&d_B, size); hipMallocManaged(&d_Res, size); // Copy over the matricies into device memory hipMemcpy(d_A, matA, size, hipMemcpyHostToDevice); hipMemcpy(d_B, matB, size, hipMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel hipLaunchKernelGGL(( CuMatBANDMatrixDKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_Res, len); // Synchronise before copying hipDeviceSynchronize(); // Copy the results out of device memory hipMemcpy(matRes, d_Res, size, hipMemcpyDeviceToHost); // Free up cuda malloc hipFree(&d_A); hipFree(&d_B); hipFree(&d_Res); } // Device function __global__ void CuMatBANDMatrixIKernel(long* A, long* B, long * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (long) (A[index] & B[index]); } } extern "C" void CuMatBANDMatrixI(HeaderI* matHeaderA, HeaderI* matHeaderB, HeaderI* matHeaderRes, long len){ long* matA; long* matB; long* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; long* d_A; long *d_B; long * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA hipMallocManaged(&d_A, size); hipMallocManaged(&d_B, size); hipMallocManaged(&d_Res, size); // Copy over the matricies into device memory hipMemcpy(d_A, matA, size, hipMemcpyHostToDevice); hipMemcpy(d_B, matB, size, hipMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel hipLaunchKernelGGL(( CuMatBANDMatrixIKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_Res, len); // Synchronise before copying hipDeviceSynchronize(); // Copy the results out of device memory hipMemcpy(matRes, d_Res, size, hipMemcpyDeviceToHost); // Free up cuda malloc hipFree(&d_A); hipFree(&d_B); hipFree(&d_Res); }
248c3d34a8d2ad38e3a18f62f4e89eff6c29a118.cu
/* This file consists of CUDA code which is compiled with the CuMat program and linked in with the output with clang. Most of this is just setup for llvm with cuBLAS so that we can call the functions and have them return CuMat data (row major) */ #include <cstring> #include <cublas_v2.h> #include <cuda.h> #include <cuda_runtime.h> #include "../utils/headers.hpp" // Device function __global__ void CuMatLORMatrixDKernel(double* A, double* B, double * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (double) (A[index] || B[index]); } } extern "C" void CuMatLORMatrixD(HeaderD* matHeaderA, HeaderD* matHeaderB, HeaderD* matHeaderRes, long len){ double* matA; double* matB; double* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; double* d_A; double *d_B; double * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA cudaMallocManaged(&d_A, size); cudaMallocManaged(&d_B, size); cudaMallocManaged(&d_Res, size); // Copy over the matricies into device memory cudaMemcpy(d_A, matA, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, matB, size, cudaMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel CuMatLORMatrixDKernel<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_Res, len); // Synchronise before copying cudaDeviceSynchronize(); // Copy the results out of device memory cudaMemcpy(matRes, d_Res, size, cudaMemcpyDeviceToHost); // Free up cuda malloc cudaFree(&d_A); cudaFree(&d_B); cudaFree(&d_Res); } // Device function __global__ void CuMatLORMatrixIKernel(long* A, long* B, long * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (long) (A[index] || B[index]); } } extern "C" void CuMatLORMatrixI(HeaderI* matHeaderA, HeaderI* matHeaderB, HeaderI* matHeaderRes, long len){ long* matA; long* matB; long* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; long* d_A; long *d_B; long * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA cudaMallocManaged(&d_A, size); cudaMallocManaged(&d_B, size); cudaMallocManaged(&d_Res, size); // Copy over the matricies into device memory cudaMemcpy(d_A, matA, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, matB, size, cudaMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel CuMatLORMatrixIKernel<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_Res, len); // Synchronise before copying cudaDeviceSynchronize(); // Copy the results out of device memory cudaMemcpy(matRes, d_Res, size, cudaMemcpyDeviceToHost); // Free up cuda malloc cudaFree(&d_A); cudaFree(&d_B); cudaFree(&d_Res); } /* * Logical AND Kernel functions */ // Device function __global__ void CuMatLANDMatrixDKernel(double* A, double* B, double * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (double) (A[index] && B[index]); } } extern "C" void CuMatLANDMatrixD(HeaderD* matHeaderA, HeaderD* matHeaderB, HeaderD* matHeaderRes, long len){ double* matA; double* matB; double* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; double* d_A; double *d_B; double * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA cudaMallocManaged(&d_A, size); cudaMallocManaged(&d_B, size); cudaMallocManaged(&d_Res, size); // Copy over the matricies into device memory cudaMemcpy(d_A, matA, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, matB, size, cudaMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel CuMatLANDMatrixDKernel<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_Res, len); // Synchronise before copying cudaDeviceSynchronize(); // Copy the results out of device memory cudaMemcpy(matRes, d_Res, size, cudaMemcpyDeviceToHost); // Free up cuda malloc cudaFree(&d_A); cudaFree(&d_B); cudaFree(&d_Res); } // Device function __global__ void CuMatLANDMatrixIKernel(long* A, long* B, long * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (long) (A[index] && B[index]); } } extern "C" void CuMatLANDMatrixI(HeaderI* matHeaderA, HeaderI* matHeaderB, HeaderI* matHeaderRes, long len){ long* matA; long* matB; long* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; long* d_A; long *d_B; long * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA cudaMallocManaged(&d_A, size); cudaMallocManaged(&d_B, size); cudaMallocManaged(&d_Res, size); // Copy over the matricies into device memory cudaMemcpy(d_A, matA, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, matB, size, cudaMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel CuMatLANDMatrixIKernel<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_Res, len); // Synchronise before copying cudaDeviceSynchronize(); // Copy the results out of device memory cudaMemcpy(matRes, d_Res, size, cudaMemcpyDeviceToHost); // Free up cuda malloc cudaFree(&d_A); cudaFree(&d_B); cudaFree(&d_Res); } /* * Bitwise functions */ // Device function __global__ void CuMatBORMatrixDKernel(double* A, double* B, double * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (double) (((long)A[index]) | ((long)B[index])); } } extern "C" void CuMatBORMatrixD(HeaderD* matHeaderA, HeaderD* matHeaderB, HeaderD* matHeaderRes, long len){ double* matA; double* matB; double* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; double* d_A; double *d_B; double * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA cudaMallocManaged(&d_A, size); cudaMallocManaged(&d_B, size); cudaMallocManaged(&d_Res, size); // Copy over the matricies into device memory cudaMemcpy(d_A, matA, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, matB, size, cudaMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel CuMatBORMatrixDKernel<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_Res, len); // Synchronise before copying cudaDeviceSynchronize(); // Copy the results out of device memory cudaMemcpy(matRes, d_Res, size, cudaMemcpyDeviceToHost); // Free up cuda malloc cudaFree(&d_A); cudaFree(&d_B); cudaFree(&d_Res); } // Device function __global__ void CuMatBORMatrixIKernel(long* A, long* B, long * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (long) (A[index] | B[index]); } } extern "C" void CuMatBORMatrixI(HeaderI* matHeaderA, HeaderI* matHeaderB, HeaderI* matHeaderRes, long len){ long* matA; long* matB; long* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; long* d_A; long *d_B; long * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA cudaMallocManaged(&d_A, size); cudaMallocManaged(&d_B, size); cudaMallocManaged(&d_Res, size); // Copy over the matricies into device memory cudaMemcpy(d_A, matA, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, matB, size, cudaMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel CuMatBORMatrixIKernel<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_Res, len); // Synchronise before copying cudaDeviceSynchronize(); // Copy the results out of device memory cudaMemcpy(matRes, d_Res, size, cudaMemcpyDeviceToHost); // Free up cuda malloc cudaFree(&d_A); cudaFree(&d_B); cudaFree(&d_Res); } /* * Logical AND Kernel functions */ // Device function __global__ void CuMatBANDMatrixDKernel(double* A, double* B, double * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (double) (((long)A[index]) & ((long)B[index])); } } extern "C" void CuMatBANDMatrixD(HeaderD* matHeaderA, HeaderD* matHeaderB, HeaderD* matHeaderRes, long len){ double* matA; double* matB; double* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; double* d_A; double *d_B; double * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA cudaMallocManaged(&d_A, size); cudaMallocManaged(&d_B, size); cudaMallocManaged(&d_Res, size); // Copy over the matricies into device memory cudaMemcpy(d_A, matA, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, matB, size, cudaMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel CuMatBANDMatrixDKernel<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_Res, len); // Synchronise before copying cudaDeviceSynchronize(); // Copy the results out of device memory cudaMemcpy(matRes, d_Res, size, cudaMemcpyDeviceToHost); // Free up cuda malloc cudaFree(&d_A); cudaFree(&d_B); cudaFree(&d_Res); } // Device function __global__ void CuMatBANDMatrixIKernel(long* A, long* B, long * res, long len){ long index = blockDim.x * blockIdx.x + threadIdx.x; if(index < len){ res[index] = (long) (A[index] & B[index]); } } extern "C" void CuMatBANDMatrixI(HeaderI* matHeaderA, HeaderI* matHeaderB, HeaderI* matHeaderRes, long len){ long* matA; long* matB; long* matRes; matA = matHeaderA->data; matB = matHeaderB->data; matRes = matHeaderRes->data; long* d_A; long *d_B; long * d_Res; size_t size = len*sizeof(long); // Allocate memory for CUDA cudaMallocManaged(&d_A, size); cudaMallocManaged(&d_B, size); cudaMallocManaged(&d_Res, size); // Copy over the matricies into device memory cudaMemcpy(d_A, matA, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, matB, size, cudaMemcpyHostToDevice); // Set the number of threads per block and grid size int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock -1) / threadsPerBlock; // Call the kernel CuMatBANDMatrixIKernel<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_Res, len); // Synchronise before copying cudaDeviceSynchronize(); // Copy the results out of device memory cudaMemcpy(matRes, d_Res, size, cudaMemcpyDeviceToHost); // Free up cuda malloc cudaFree(&d_A); cudaFree(&d_B); cudaFree(&d_Res); }
22a935f842578dc82b2e7b2a5427d69338b2e261.hip
// !!! This is a file automatically generated by hipify!!! #include <cusp/complex.h> #include <hipfft.h> #include <thrust\device_vector.h> #include <thrust\device_ptr.h> #include <hip/hip_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> using namespace cusp; using namespace thrust; const unsigned int TILE_DIM = 32; const unsigned int BLOCK_ROWS = 32; __global__ void fftshift(complex<float> * d_idata, complex<float> * d_odata) { __shared__ complex<float> tile[TILE_DIM][TILE_DIM + 1]; int x = TILE_DIM * blockIdx.x + threadIdx.x; int y = TILE_DIM * blockIdx.y + threadIdx.y; int width = gridDim.x * TILE_DIM; tile[threadIdx.y][threadIdx.x] = d_idata[y*width + x]; __syncthreads(); if (blockIdx.x < width / (TILE_DIM * 2) && blockIdx.y < width / (TILE_DIM * 2)) { d_odata[(y + width / 2)*width + x + width / 2] = tile[threadIdx.y][threadIdx.x]; } if (blockIdx.x < width / (TILE_DIM * 2) && blockIdx.y >= width / (TILE_DIM * 2)) { d_odata[(y - width / 2)*width + x + width / 2] = tile[threadIdx.y][threadIdx.x]; } if (blockIdx.x >= width / (TILE_DIM * 2) && blockIdx.y < width / (TILE_DIM * 2)) { d_odata[(y + width / 2)*width + x - width / 2] = tile[threadIdx.y][threadIdx.x]; } if (blockIdx.x >= width / (TILE_DIM * 2) && blockIdx.y >= width / (TILE_DIM * 2)) { d_odata[(y - width / 2)*width + x - width / 2] = tile[threadIdx.y][threadIdx.x]; } } __global__ void ComplexScale(complex<float>* d_idata, complex<float>* d_odata, float scale, unsigned int width, unsigned int height) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockIdx.y * blockDim.y; if (col< width && row < height) { d_odata[row*width + col] = d_idata[row*width + col] * scale; } } extern "C" void pre_cuFft2D(std::complex<float> * h_kspace, std::complex<float> * h_image, bool fft_forward, unsigned int width, unsigned int height) { complex<float> *d_kspace, *d_image; device_vector<complex<float>> d_buffer(width * height); size_t lpitch; hipfftHandle plan; checkCudaErrors(hipfftPlan2d(&plan, height, width, HIPFFT_C2C)); checkCudaErrors(hipMallocPitch((void **)&d_kspace, &lpitch, width * sizeof(complex<float>), height)); checkCudaErrors(hipMallocPitch((void **)&d_image, &lpitch, width * sizeof(complex<float>), height)); checkCudaErrors(hipMemcpy2D((void *)d_kspace, lpitch, (void *)h_kspace, sizeof(float)*2*width, sizeof(float)*2*width, height, hipMemcpyHostToDevice)); dim3 dimBlock(width, 1, 1); dim3 dimGrid(1, height, 1); dim3 grid(width / TILE_DIM, height / TILE_DIM, 1); dim3 block(TILE_DIM, BLOCK_ROWS, 1); hipLaunchKernelGGL(( fftshift) , dim3(grid), dim3(block) , 0, 0, d_kspace, raw_pointer_cast(d_buffer.data())); if (fft_forward) { checkCudaErrors(hipfftExecC2C(plan, raw_pointer_cast(d_buffer.data()), d_image, HIPFFT_FORWARD)); } else { checkCudaErrors(hipfftExecC2C(plan, raw_pointer_cast(d_buffer.data()), d_image, HIPFFT_BACKWARD)); } fftshift << <grid, block >> >(d_image, raw_pointer_cast(d_buffer.data())); float scale = 1 / (float)std::sqrt(width * height); ComplexScale << <dimGrid, dimBlock >> >(raw_pointer_cast(d_buffer.data()), d_image, scale, width, height); checkCudaErrors(hipMemcpy2D((void *)h_image, lpitch, (void *)d_image, sizeof(float)*2*width, sizeof(float)*2*width, height, hipMemcpyDeviceToHost)); checkCudaErrors(hipfftDestroy(plan)); checkCudaErrors(hipFree(d_kspace)); checkCudaErrors(hipFree(d_image)); }
22a935f842578dc82b2e7b2a5427d69338b2e261.cu
#include <cusp/complex.h> #include <cufft.h> #include <thrust\device_vector.h> #include <thrust\device_ptr.h> #include <cuda_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> using namespace cusp; using namespace thrust; const unsigned int TILE_DIM = 32; const unsigned int BLOCK_ROWS = 32; __global__ void fftshift(complex<float> * d_idata, complex<float> * d_odata) { __shared__ complex<float> tile[TILE_DIM][TILE_DIM + 1]; int x = TILE_DIM * blockIdx.x + threadIdx.x; int y = TILE_DIM * blockIdx.y + threadIdx.y; int width = gridDim.x * TILE_DIM; tile[threadIdx.y][threadIdx.x] = d_idata[y*width + x]; __syncthreads(); if (blockIdx.x < width / (TILE_DIM * 2) && blockIdx.y < width / (TILE_DIM * 2)) { d_odata[(y + width / 2)*width + x + width / 2] = tile[threadIdx.y][threadIdx.x]; } if (blockIdx.x < width / (TILE_DIM * 2) && blockIdx.y >= width / (TILE_DIM * 2)) { d_odata[(y - width / 2)*width + x + width / 2] = tile[threadIdx.y][threadIdx.x]; } if (blockIdx.x >= width / (TILE_DIM * 2) && blockIdx.y < width / (TILE_DIM * 2)) { d_odata[(y + width / 2)*width + x - width / 2] = tile[threadIdx.y][threadIdx.x]; } if (blockIdx.x >= width / (TILE_DIM * 2) && blockIdx.y >= width / (TILE_DIM * 2)) { d_odata[(y - width / 2)*width + x - width / 2] = tile[threadIdx.y][threadIdx.x]; } } __global__ void ComplexScale(complex<float>* d_idata, complex<float>* d_odata, float scale, unsigned int width, unsigned int height) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockIdx.y * blockDim.y; if (col< width && row < height) { d_odata[row*width + col] = d_idata[row*width + col] * scale; } } extern "C" void pre_cuFft2D(std::complex<float> * h_kspace, std::complex<float> * h_image, bool fft_forward, unsigned int width, unsigned int height) { complex<float> *d_kspace, *d_image; device_vector<complex<float>> d_buffer(width * height); size_t lpitch; cufftHandle plan; checkCudaErrors(cufftPlan2d(&plan, height, width, CUFFT_C2C)); checkCudaErrors(cudaMallocPitch((void **)&d_kspace, &lpitch, width * sizeof(complex<float>), height)); checkCudaErrors(cudaMallocPitch((void **)&d_image, &lpitch, width * sizeof(complex<float>), height)); checkCudaErrors(cudaMemcpy2D((void *)d_kspace, lpitch, (void *)h_kspace, sizeof(float)*2*width, sizeof(float)*2*width, height, cudaMemcpyHostToDevice)); dim3 dimBlock(width, 1, 1); dim3 dimGrid(1, height, 1); dim3 grid(width / TILE_DIM, height / TILE_DIM, 1); dim3 block(TILE_DIM, BLOCK_ROWS, 1); fftshift <<< grid, block >>>(d_kspace, raw_pointer_cast(d_buffer.data())); if (fft_forward) { checkCudaErrors(cufftExecC2C(plan, raw_pointer_cast(d_buffer.data()), d_image, CUFFT_FORWARD)); } else { checkCudaErrors(cufftExecC2C(plan, raw_pointer_cast(d_buffer.data()), d_image, CUFFT_INVERSE)); } fftshift << <grid, block >> >(d_image, raw_pointer_cast(d_buffer.data())); float scale = 1 / (float)std::sqrt(width * height); ComplexScale << <dimGrid, dimBlock >> >(raw_pointer_cast(d_buffer.data()), d_image, scale, width, height); checkCudaErrors(cudaMemcpy2D((void *)h_image, lpitch, (void *)d_image, sizeof(float)*2*width, sizeof(float)*2*width, height, cudaMemcpyDeviceToHost)); checkCudaErrors(cufftDestroy(plan)); checkCudaErrors(cudaFree(d_kspace)); checkCudaErrors(cudaFree(d_image)); }
a880ce55617e59dc9d6d7018073eb0f742983e88.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * University of Illinois Open Source License * Copyright 2015-2018 Luthey-Schulten Group, * All rights reserved. * * Developed by: Luthey-Schulten Group * University of Illinois at Urbana-Champaign * http://www.scs.uiuc.edu/~schulten * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the Software), to deal with * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to * do so, subject to the following conditions: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimers in the documentation * and/or other materials provided with the distribution. * * - Neither the names of the Luthey-Schulten Group, University of Illinois at * Urbana-Champaign, the Roberts Group, Johns Hopkins University, nor the names * of its contributors may be used to endorse or promote products derived from * this Software without specific prior written permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE. * * Author(s): Mike Hallock */ #include <map> #include <string> #include <cstdlib> #include <sstream> #include "config.h" #if defined(MACOSX) #include <mach/mach_time.h> #elif defined(LINUX) #include <time.h> #endif #include "cuda/lm_cuda.h" #include "core/Math.h" #include "core/Print.h" #include "cme/CMESolver.h" #include "DiffusionModel.pb.h" #include "Lattice.pb.h" #include "SpeciesCounts.pb.h" #include "core/DataOutputQueue.h" #include "core/ResourceAllocator.h" #include "rdme/ByteLattice.h" #include "rdme/CudaByteLattice.h" #include "rdme/MpdRdmeSolver.h" #include "rng/RandomGenerator.h" #include "lptf/Profile.h" #include "rdme/MpdTestHarness.h" #include <hip/hiprtc.h> #define MPD_WORDS_PER_SITE 2 #define MPD_APRON_SIZE 1 using std::map; using lm::io::DiffusionModel; using lm::rdme::Lattice; using lm::rng::RandomGenerator; namespace lm { namespace rdme { __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) sanity_check(const unsigned int* L1, const unsigned int* L2); extern __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) precomp_reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList #ifdef MPD_GLOBAL_S_MATRIX , const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG, const float* qp0, const float* qp1, const float* qp2 #endif ); MpdTestHarness::MpdTestHarness() :MpdRdmeSolver() { } MpdTestHarness::~MpdTestHarness() { } void MpdTestHarness::generateTrajectory() { // Shadow the lattice member as a cuda lattice. CudaByteLattice * lattice = (CudaByteLattice *)this->lattice; // Synchronize the cuda memory. lattice->copyToGPU(); // Get the interval for writing species counts and lattices. // Get the simulation time limit. double maxTime=atof((*parameters)["maxTime"].c_str()); Print::printf(Print::INFO, "Running harness with %d species, %d reactions, %d site types for %e s with tau %e.", numberSpecies, numberReactions, numberSiteTypes, maxTime, tau); // Set the initial time. double time = 0.0; uint32_t timestep=1; total_orig = total_jit = 0.0f; hipEventCreate(&original_start); hipEventCreate(&original_end); hipEventCreate(&jit_start); hipEventCreate(&jit_end); // Loop until we have finished the simulation. while (time < maxTime) { // Run the next timestep. runTimestep(lattice, timestep++); // Update the time. time += tau; } timestep--; float avg_orig = total_orig / timestep; float avg_jit = total_jit / timestep; printf("FINAL Steps %d avg_orig %f avg_alt %f\n", timestep, avg_orig, avg_jit); } void MpdTestHarness::runTimestep(CudaByteLattice * lattice, uint32_t timestep) { // printf("*@ Timestep %d @\n", timestep); // Calculate some properties of the lattice. lattice_coord_t size = lattice->getSize(); const unsigned int latticeXSize = size.x; const unsigned int latticeYSize = size.y; const unsigned int latticeZSize = size.z; dim3 gridSize, threadBlockSize; // Execute the kernel for the x direction. calculateXLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((MpdRdmeSolver_x_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,0), (unsigned int*)cudaOverflowList))); lattice->swapSrcDest(); // Execute the kernel for the y direction. calculateYLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_Y_BLOCK_X_SIZE, TUNE_MPD_Y_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((MpdRdmeSolver_y_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,1), (unsigned int*)cudaOverflowList))); lattice->swapSrcDest(); // Execute the kernel for the z direction. calculateZLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_Z_BLOCK_X_SIZE, TUNE_MPD_Z_BLOCK_Z_SIZE, latticeXSize, latticeYSize, latticeZSize); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((MpdRdmeSolver_z_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,2), (unsigned int*)cudaOverflowList))); lattice->swapSrcDest(); // Execute the kernel for the reaction, this kernel updates the lattice in-place, so only the src pointer is passed. calculateReactionLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_REACTION_BLOCK_X_SIZE, TUNE_MPD_REACTION_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize); // Copy SRC to DST so we can co-run CUDA_EXCEPTION_EXECUTE(hipMemcpyAsync(lattice->getGPUMemoryDest(), lattice->getGPUMemorySrc(), lattice->getParticleMemorySize(), hipMemcpyDeviceToDevice, cudaStream)); // Run original // Run static alternate test kernel hipEventRecord(original_start, cudaStream); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((MpdRdmeSolver_reaction_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList #ifdef MPD_GLOBAL_S_MATRIX , SG, RLG #endif ))); hipEventRecord(original_end, cudaStream); hipEventRecord(jit_start, cudaStream); // Defined in MpdRdmeSolver.cu to make sure we get same constants hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((precomp_reaction_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemoryDest(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList #ifdef MPD_GLOBAL_S_MATRIX , SG, RLG, propZeroOrder, propFirstOrder, propSecondOrder #endif ))); hipEventRecord(jit_end, cudaStream); // Check sanity hipLaunchKernelGGL(( sanity_check), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (unsigned int *)lattice->getGPUMemoryDest()); // Wait for the kernels to complete. CUDA_EXCEPTION_CHECK(hipStreamSynchronize(cudaStream)); float otime, jtime; hipEventElapsedTime(&otime, original_start, original_end); hipEventElapsedTime(&jtime, jit_start, jit_end); if (timestep % 5000 == 0) printf("TS %d Orignial time: %f ms, Alt time: %f ms\n", timestep, otime, jtime); total_orig += otime; total_jit += jtime; uint32_t overflowList[1+2*TUNE_MPD_MAX_PARTICLE_OVERFLOWS]; CUDA_EXCEPTION_CHECK(hipMemcpy(overflowList, cudaOverflowList, sizeof(uint32_t), hipMemcpyDeviceToHost)); uint numberExceptions = overflowList[0]; if (numberExceptions > 0) { Print::printf(Print::DEBUG, "%d overflows (not resolving)", numberExceptions); // Reset the overflow list. CUDA_EXCEPTION_CHECK(hipMemset(cudaOverflowList, 0, sizeof(uint32_t))); } } } }
a880ce55617e59dc9d6d7018073eb0f742983e88.cu
/* * University of Illinois Open Source License * Copyright 2015-2018 Luthey-Schulten Group, * All rights reserved. * * Developed by: Luthey-Schulten Group * University of Illinois at Urbana-Champaign * http://www.scs.uiuc.edu/~schulten * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the Software), to deal with * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to * do so, subject to the following conditions: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimers in the documentation * and/or other materials provided with the distribution. * * - Neither the names of the Luthey-Schulten Group, University of Illinois at * Urbana-Champaign, the Roberts Group, Johns Hopkins University, nor the names * of its contributors may be used to endorse or promote products derived from * this Software without specific prior written permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE. * * Author(s): Mike Hallock */ #include <map> #include <string> #include <cstdlib> #include <sstream> #include "config.h" #if defined(MACOSX) #include <mach/mach_time.h> #elif defined(LINUX) #include <time.h> #endif #include "cuda/lm_cuda.h" #include "core/Math.h" #include "core/Print.h" #include "cme/CMESolver.h" #include "DiffusionModel.pb.h" #include "Lattice.pb.h" #include "SpeciesCounts.pb.h" #include "core/DataOutputQueue.h" #include "core/ResourceAllocator.h" #include "rdme/ByteLattice.h" #include "rdme/CudaByteLattice.h" #include "rdme/MpdRdmeSolver.h" #include "rng/RandomGenerator.h" #include "lptf/Profile.h" #include "rdme/MpdTestHarness.h" #include <nvrtc.h> #define MPD_WORDS_PER_SITE 2 #define MPD_APRON_SIZE 1 using std::map; using lm::io::DiffusionModel; using lm::rdme::Lattice; using lm::rng::RandomGenerator; namespace lm { namespace rdme { __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) sanity_check(const unsigned int* L1, const unsigned int* L2); extern __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) precomp_reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList #ifdef MPD_GLOBAL_S_MATRIX , const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG, const float* qp0, const float* qp1, const float* qp2 #endif ); MpdTestHarness::MpdTestHarness() :MpdRdmeSolver() { } MpdTestHarness::~MpdTestHarness() { } void MpdTestHarness::generateTrajectory() { // Shadow the lattice member as a cuda lattice. CudaByteLattice * lattice = (CudaByteLattice *)this->lattice; // Synchronize the cuda memory. lattice->copyToGPU(); // Get the interval for writing species counts and lattices. // Get the simulation time limit. double maxTime=atof((*parameters)["maxTime"].c_str()); Print::printf(Print::INFO, "Running harness with %d species, %d reactions, %d site types for %e s with tau %e.", numberSpecies, numberReactions, numberSiteTypes, maxTime, tau); // Set the initial time. double time = 0.0; uint32_t timestep=1; total_orig = total_jit = 0.0f; cudaEventCreate(&original_start); cudaEventCreate(&original_end); cudaEventCreate(&jit_start); cudaEventCreate(&jit_end); // Loop until we have finished the simulation. while (time < maxTime) { // Run the next timestep. runTimestep(lattice, timestep++); // Update the time. time += tau; } timestep--; float avg_orig = total_orig / timestep; float avg_jit = total_jit / timestep; printf("FINAL Steps %d avg_orig %f avg_alt %f\n", timestep, avg_orig, avg_jit); } void MpdTestHarness::runTimestep(CudaByteLattice * lattice, uint32_t timestep) { // printf("*@ Timestep %d @\n", timestep); // Calculate some properties of the lattice. lattice_coord_t size = lattice->getSize(); const unsigned int latticeXSize = size.x; const unsigned int latticeYSize = size.y; const unsigned int latticeZSize = size.z; dim3 gridSize, threadBlockSize; // Execute the kernel for the x direction. calculateXLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize); CUDA_EXCEPTION_EXECUTE((MpdRdmeSolver_x_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,0), (unsigned int*)cudaOverflowList))); lattice->swapSrcDest(); // Execute the kernel for the y direction. calculateYLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_Y_BLOCK_X_SIZE, TUNE_MPD_Y_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize); CUDA_EXCEPTION_EXECUTE((MpdRdmeSolver_y_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,1), (unsigned int*)cudaOverflowList))); lattice->swapSrcDest(); // Execute the kernel for the z direction. calculateZLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_Z_BLOCK_X_SIZE, TUNE_MPD_Z_BLOCK_Z_SIZE, latticeXSize, latticeYSize, latticeZSize); CUDA_EXCEPTION_EXECUTE((MpdRdmeSolver_z_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,2), (unsigned int*)cudaOverflowList))); lattice->swapSrcDest(); // Execute the kernel for the reaction, this kernel updates the lattice in-place, so only the src pointer is passed. calculateReactionLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_REACTION_BLOCK_X_SIZE, TUNE_MPD_REACTION_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize); // Copy SRC to DST so we can co-run CUDA_EXCEPTION_EXECUTE(cudaMemcpyAsync(lattice->getGPUMemoryDest(), lattice->getGPUMemorySrc(), lattice->getParticleMemorySize(), cudaMemcpyDeviceToDevice, cudaStream)); // Run original // Run static alternate test kernel cudaEventRecord(original_start, cudaStream); CUDA_EXCEPTION_EXECUTE((MpdRdmeSolver_reaction_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList #ifdef MPD_GLOBAL_S_MATRIX , SG, RLG #endif ))); cudaEventRecord(original_end, cudaStream); cudaEventRecord(jit_start, cudaStream); // Defined in MpdRdmeSolver.cu to make sure we get same constants CUDA_EXCEPTION_EXECUTE((precomp_reaction_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemoryDest(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList #ifdef MPD_GLOBAL_S_MATRIX , SG, RLG, propZeroOrder, propFirstOrder, propSecondOrder #endif ))); cudaEventRecord(jit_end, cudaStream); // Check sanity sanity_check<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (unsigned int *)lattice->getGPUMemoryDest()); // Wait for the kernels to complete. CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(cudaStream)); float otime, jtime; cudaEventElapsedTime(&otime, original_start, original_end); cudaEventElapsedTime(&jtime, jit_start, jit_end); if (timestep % 5000 == 0) printf("TS %d Orignial time: %f ms, Alt time: %f ms\n", timestep, otime, jtime); total_orig += otime; total_jit += jtime; uint32_t overflowList[1+2*TUNE_MPD_MAX_PARTICLE_OVERFLOWS]; CUDA_EXCEPTION_CHECK(cudaMemcpy(overflowList, cudaOverflowList, sizeof(uint32_t), cudaMemcpyDeviceToHost)); uint numberExceptions = overflowList[0]; if (numberExceptions > 0) { Print::printf(Print::DEBUG, "%d overflows (not resolving)", numberExceptions); // Reset the overflow list. CUDA_EXCEPTION_CHECK(cudaMemset(cudaOverflowList, 0, sizeof(uint32_t))); } } } }
ed135025f55fd8d684768bea61f7fb65cb62433b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <algorithm> #include <chrono> #include "fingerprint_structure.hpp" using namespace std; const int BLOCKSIZE = 36; // Constant weights const float w1 = 0.16f; const float w2 = 0.37f; const float w3 = 0.16f; const float w4 = 0.31f; __host__ __device__ unsigned char dperiod_to_byte(float period) { float fresult = period/period_unit; unsigned char result = (char)fresult; return result; } __host__ __device__ float dbyte_to_period(unsigned char c) { float result = period_unit*(int)c; return result; } __host__ __device__ unsigned char dfrequency_to_byte(float frequency) { if (frequency == 0) { return dperiod_to_byte(frequency); } else { return dperiod_to_byte(1.0f/frequency); } } __host__ __device__ float dbyte_to_frequency(unsigned char c) { float result = dbyte_to_period(c); if (result == 0) return result; else return 1/result; } __device__ float dbyte_to_coherence(unsigned char c) { float result = (float)c/coherence_unit; return result; } __device__ float dbyte_to_orientation(unsigned char c) { float result = orientation_unit*(int)c; return result; } __global__ void calculate_s1(fingerprint* db, fingerprint* fp, float* result, int* mapping) { __shared__ float ss, scos, ssin; int j = blockIdx.x; int i = threadIdx.x; // int idx = blockIdx.x * blockDim.x + threadIdx.x; if (i == 0) { ss = 0; scos = 0; ssin = 0; } __syncthreads(); // (db+j)->local_frequency[i] = dfrequency_t_byte(dbyte_to_frequency((db+j)->local_frequency[i])+0.1); float s = dbyte_to_coherence(fp->local_coherence[i])*dbyte_to_coherence((db+j)->local_coherence[i]); float d = M_PI/180.0f * 2 * (dbyte_to_orientation(fp->local_orientation[i])-dbyte_to_orientation((db+j)->local_orientation[i])); float tcos = s*cos(d); float tsin = s*sin(d); atomicAdd(&ss, s); atomicAdd(&scos, tcos); atomicAdd(&ssin, tsin); __syncthreads(); if (i == 0) { result[j] = sqrt(pow(scos,2)+pow(ssin,2))/ss; } __syncthreads(); // First core of a fingerprint check maximum from all core if (i == 0 && (db+j)->id%5 == 1) { int max_idx = j; for (int i=1 ; i<5 ; i++) { if ((db+j+1)->id%5 == 1) break; else { if (result[j+i] > result[max_idx]) { max_idx = j+i; } } } mapping[((db+j)->id-1)/5] = max_idx; } } __global__ void calculate_s2(fingerprint* db, fingerprint* fp, float* result) { __shared__ float s_addition, s_absdiff; int j = blockIdx.x; int i = threadIdx.x; // int idx = blockIdx.x*blockDim.x + threadIdx.x; float t_addition = dbyte_to_frequency(fp->local_frequency[i]) + dbyte_to_frequency((db+j)->local_frequency[i]); float t_absdiff = abs(dbyte_to_frequency(fp->local_frequency[i]) - dbyte_to_frequency((db+j)->local_frequency[i])); atomicAdd(&s_addition, t_addition); atomicAdd(&s_absdiff, t_absdiff); if (i == 0) { result[j] = 1 - (s_absdiff/s_addition); } } __global__ void calculate_s2_with_mapping(fingerprint* db, fingerprint* fp, float* result, int* mapping) { __shared__ float s_addition, s_absdiff; int j = mapping[blockIdx.x]; int i = threadIdx.x; // int idx = blockIdx.x*blockDim.x + threadIdx.x; float t_addition = dbyte_to_frequency(fp->local_frequency[i]) + dbyte_to_frequency((db+j)->local_frequency[i]); float t_absdiff = abs(dbyte_to_frequency(fp->local_frequency[i]) - dbyte_to_frequency((db+j)->local_frequency[i])); atomicAdd(&s_addition, t_addition); atomicAdd(&s_absdiff, t_absdiff); if (i == 0) { result[blockIdx.x] = 1 - (s_absdiff/s_addition); } } __global__ void calculate_s3(fingerprint* db, fingerprint* fp, float* result) { int j = blockIdx.x; result[j] = 1 - (abs(dbyte_to_frequency(fp->avg_frequency)-dbyte_to_frequency((db+j)->avg_frequency))/max(dbyte_to_frequency(fp->avg_frequency), dbyte_to_frequency((db+j)->avg_frequency))); } __global__ void calculate_s3_with_mapping(fingerprint* db, fingerprint* fp, float* result, int* mapping) { int j = mapping[blockIdx.x]; result[blockIdx.x] = 1 - (abs(dbyte_to_frequency(fp->avg_frequency)-dbyte_to_frequency((db+j)->avg_frequency))/max(dbyte_to_frequency(fp->avg_frequency), dbyte_to_frequency((db+j)->avg_frequency))); } __global__ void calculate_s4(fingerprint* db, fingerprint* fp, float* result) { int j = blockIdx.x; result[j] = 1-(abs(dbyte_to_orientation(fp->avg_orientation)-dbyte_to_orientation((db+j)->avg_orientation))/180.0f); } __global__ void calculate_s4_with_mapping(fingerprint* db, fingerprint* fp, float* result, int* mapping) { int j = mapping[blockIdx.x]; result[blockIdx.x] = 1-(abs(dbyte_to_orientation(fp->avg_orientation)-dbyte_to_orientation((db+j)->avg_orientation))/180.0f); } __global__ void calculate_s(float* s1, float* s2, float*s3, float* s4, float* result) { int i = threadIdx.x; result[i] = w1*s1[i] + w2*s2[i] + w3*s3[i] + w4*s4[i]; } __global__ void calculate_s_with_mapping(float* s1, float* s2, float*s3, float* s4, float* result, int* mapping) { int i = threadIdx.x; result[i] = w1*s1[mapping[i]] + w2*s2[i] + w3*s3[i] + w4*s4[i]; } __global__ void get_top_fingerprints(float* s, float* result, int* mapping) { int i = threadIdx.x; result[i] = s[mapping[i]]; } int main(int argc, char** argv) { if (argc < 3) { cerr << "Usage : ./parallel_indexing fingerprint-to-be-searched fingerprint-db\n"; return 0; } string fp_filename = argv[1]; string db_filename = argv[2]; cerr << "FP " << fp_filename << " DB " << db_filename << endl; // Read the fingerprint to be searched vector<struct fingerprint> fp; int count_fp = read_from_file(fp, fp_filename); vector<float> local_orie, local_cohe, local_freq; get_fingerprint_local_values(fp[0], local_orie, local_cohe, local_freq); float avg_o = get_fingerprint_average_orientation(fp[0]); float avg_f = get_fingerprint_average_frequency(fp[0]); // Read the database vector<struct fingerprint> db; int count_db = read_from_file(db, db_filename); cerr << "Fingerprint core database count : " << count_db << endl; cerr << "Last fingerprint ID : " << db[count_db-1].id << endl; int count_db_fingerprint = (db[count_db-1].id-1)/5+1; cerr << "Fingerprint database count : " << count_db_fingerprint << endl; auto timer_start = chrono::steady_clock::now(); // Test S1 fingerprint *d_fp, *d_db; float s1_result[count_db], s2_result[count_db], s3_result[count_db], s4_result[count_db]; float result[count_db]; float *d_result; hipMalloc((void **)&d_fp, sizeof(fingerprint)); hipMalloc((void **)&d_db, count_db*sizeof(fingerprint)); hipMalloc((void **)&d_result, count_db*sizeof(float)); //Mapping for block idx to fingerprint core idx int *d_mapping; hipMalloc((void **)&d_mapping, count_db_fingerprint*sizeof(int)); hipMemcpy(d_db, &db[0], count_db*sizeof(fingerprint), hipMemcpyHostToDevice); hipMemcpy(d_fp, &fp[0], sizeof(fingerprint), hipMemcpyHostToDevice); hipLaunchKernelGGL(( calculate_s1), dim3(count_db),dim3(BLOCKSIZE), 0, 0, d_db, d_fp, d_result, d_mapping); hipMemcpy(&s1_result[0], d_result, count_db*sizeof(float), hipMemcpyDeviceToHost); int mapping[count_db_fingerprint]; memset(mapping, 0, sizeof(mapping)); hipMemcpy(&mapping[0], d_mapping, count_db_fingerprint*sizeof(int), hipMemcpyDeviceToHost); // for (int i=0 ; i<count_db ; i++) { // cout << i << " : ID " << db[i].id << endl; // cout << "result = " << s1_result[i] << endl; // } // Check mapping cout << "MAPPING\n"; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << i << " " << mapping[i] << endl; } // Test S2 // Only calculate for 1 core per fingerprint // calculate_s2<<<count_db,BLOCKSIZE>>>(d_db, d_fp, d_result); hipLaunchKernelGGL(( calculate_s2_with_mapping), dim3(count_db_fingerprint),dim3(BLOCKSIZE), 0, 0, d_db, d_fp, d_result, d_mapping); hipMemcpy(&s2_result[0], d_result, count_db*sizeof(float), hipMemcpyDeviceToHost); // cout << "\n\nS2\n"; // for (int i=0 ; i<count_db ; i++) { // cout << i << " : ID " << db[i].id << endl; // cout << "result = " << s2_result[i] << endl; // } // Test S3 // calculate_s3<<<count_db,1>>>(d_db, d_fp, d_result); hipLaunchKernelGGL(( calculate_s3_with_mapping), dim3(count_db_fingerprint),dim3(1), 0, 0, d_db, d_fp, d_result,d_mapping); hipMemcpy(&s3_result[0], d_result, count_db*sizeof(float), hipMemcpyDeviceToHost); // cout << "\n\nS3\n"; // for (int i=0 ; i<count_db ; i++) { // cout << i << " : ID " << db[i].id << endl; // cout << "result = " << s3_result[i] << endl; // } // Test S4 // calculate_s4<<<count_db,1>>>(d_db, d_fp, d_result); hipLaunchKernelGGL(( calculate_s4_with_mapping), dim3(count_db_fingerprint),dim3(1), 0, 0, d_db, d_fp, d_result, d_mapping); hipMemcpy(&s4_result[0], d_result, count_db*sizeof(float), hipMemcpyDeviceToHost); // cout << "\n\nS4\n"; // for (int i=0 ; i<count_db ; i++) { // cout << i << " : ID " << db[i].id << endl; // cout << "result = " << s4_result[i] << endl; // } // Test S // Copy S1-S4 to device float *d_s1_result, *d_s2_result, *d_s3_result, *d_s4_result; hipMalloc((void **)&d_s1_result, count_db*sizeof(float)); hipMalloc((void **)&d_s2_result, count_db*sizeof(float)); hipMalloc((void **)&d_s3_result, count_db*sizeof(float)); hipMalloc((void **)&d_s4_result, count_db*sizeof(float)); hipMemcpy(d_s1_result, &s1_result[0], count_db*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_s2_result, &s2_result[0], count_db*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_s3_result, &s3_result[0], count_db*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_s4_result, &s4_result[0], count_db*sizeof(float), hipMemcpyHostToDevice); // calculate_s<<<1,count_db>>>(d_s1_result, d_s2_result, d_s3_result, d_s4_result, d_result); hipLaunchKernelGGL(( calculate_s_with_mapping), dim3(1),dim3(count_db_fingerprint), 0, 0, d_s1_result, d_s2_result, d_s3_result, d_s4_result, d_result, d_mapping); hipMemcpy(&result[0], d_result, count_db*sizeof(float), hipMemcpyDeviceToHost); cout << "\n\nS\n"; // for (int i=0 ; i<count_db ; i++) { /*for (int i=0 ; i<count_db_fingerprint ; i++) { cout << i << " : ID " << db[i].id << endl; cout << "result = " << result[i] << endl; }*/ float *d_final_result; /* This is for when not used with mapping */ // get_top_fingerprints<<<1,count_db_fingerprint>>>(d_result, d_final_result, d_mapping); // hipMemcpy(&result[0], d_final_result, count_db_fingerprint*sizeof(float), hipMemcpyDeviceToHost); cout << "\n\nFinal Result\n"; vector< pair<float, int> > best_matches; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << "result = " << result[i] << endl; best_matches.push_back(make_pair(result[i], db[mapping[i]].id)); } sort(best_matches.rbegin(), best_matches.rend()); cout << "\nBest match\n"; for (int i=0 ; i<best_matches.size() ; i++) { cout << "ID " << best_matches[i].second << "-"<< best_matches[i].second/5 <<"\t: " << best_matches[i].first << endl; } auto timer_end = chrono::steady_clock::now(); chrono::duration<double> diff = timer_end - timer_start; cout << "Time to get indexing result for " << count_db << " fingerprints in DB : " << diff.count() << endl; // DEBUG /*cout << "\nS1\n"; for (int i=0 ; i<count_db ; i++) { cout << s1_result[i] << endl; } cout << "\nS1\n"; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << s1_result[mapping[i]] << endl; } cout << "\nS2\n"; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << s2_result[i] << endl; } cout << "\nS3\n"; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << s3_result[i] << endl; } cout << "\nS4\n"; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << s4_result[i] << endl; } cout << "\nS\n"; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << result[i] << endl; }*/ hipFree(d_fp); hipFree(d_db); hipFree(d_result); hipFree(d_mapping); hipFree(d_s1_result); hipFree(d_s2_result); hipFree(d_s3_result); hipFree(d_s4_result); hipFree(d_final_result); return 0; } // nvcc -o parallel_indexing parallel_indexing.cu fingerprint_structure.cpp -std=c++11
ed135025f55fd8d684768bea61f7fb65cb62433b.cu
#include <stdio.h> #include <iostream> #include <algorithm> #include <chrono> #include "fingerprint_structure.hpp" using namespace std; const int BLOCKSIZE = 36; // Constant weights const float w1 = 0.16f; const float w2 = 0.37f; const float w3 = 0.16f; const float w4 = 0.31f; __host__ __device__ unsigned char dperiod_to_byte(float period) { float fresult = period/period_unit; unsigned char result = (char)fresult; return result; } __host__ __device__ float dbyte_to_period(unsigned char c) { float result = period_unit*(int)c; return result; } __host__ __device__ unsigned char dfrequency_to_byte(float frequency) { if (frequency == 0) { return dperiod_to_byte(frequency); } else { return dperiod_to_byte(1.0f/frequency); } } __host__ __device__ float dbyte_to_frequency(unsigned char c) { float result = dbyte_to_period(c); if (result == 0) return result; else return 1/result; } __device__ float dbyte_to_coherence(unsigned char c) { float result = (float)c/coherence_unit; return result; } __device__ float dbyte_to_orientation(unsigned char c) { float result = orientation_unit*(int)c; return result; } __global__ void calculate_s1(fingerprint* db, fingerprint* fp, float* result, int* mapping) { __shared__ float ss, scos, ssin; int j = blockIdx.x; int i = threadIdx.x; // int idx = blockIdx.x * blockDim.x + threadIdx.x; if (i == 0) { ss = 0; scos = 0; ssin = 0; } __syncthreads(); // (db+j)->local_frequency[i] = dfrequency_t_byte(dbyte_to_frequency((db+j)->local_frequency[i])+0.1); float s = dbyte_to_coherence(fp->local_coherence[i])*dbyte_to_coherence((db+j)->local_coherence[i]); float d = M_PI/180.0f * 2 * (dbyte_to_orientation(fp->local_orientation[i])-dbyte_to_orientation((db+j)->local_orientation[i])); float tcos = s*cos(d); float tsin = s*sin(d); atomicAdd(&ss, s); atomicAdd(&scos, tcos); atomicAdd(&ssin, tsin); __syncthreads(); if (i == 0) { result[j] = sqrt(pow(scos,2)+pow(ssin,2))/ss; } __syncthreads(); // First core of a fingerprint check maximum from all core if (i == 0 && (db+j)->id%5 == 1) { int max_idx = j; for (int i=1 ; i<5 ; i++) { if ((db+j+1)->id%5 == 1) break; else { if (result[j+i] > result[max_idx]) { max_idx = j+i; } } } mapping[((db+j)->id-1)/5] = max_idx; } } __global__ void calculate_s2(fingerprint* db, fingerprint* fp, float* result) { __shared__ float s_addition, s_absdiff; int j = blockIdx.x; int i = threadIdx.x; // int idx = blockIdx.x*blockDim.x + threadIdx.x; float t_addition = dbyte_to_frequency(fp->local_frequency[i]) + dbyte_to_frequency((db+j)->local_frequency[i]); float t_absdiff = abs(dbyte_to_frequency(fp->local_frequency[i]) - dbyte_to_frequency((db+j)->local_frequency[i])); atomicAdd(&s_addition, t_addition); atomicAdd(&s_absdiff, t_absdiff); if (i == 0) { result[j] = 1 - (s_absdiff/s_addition); } } __global__ void calculate_s2_with_mapping(fingerprint* db, fingerprint* fp, float* result, int* mapping) { __shared__ float s_addition, s_absdiff; int j = mapping[blockIdx.x]; int i = threadIdx.x; // int idx = blockIdx.x*blockDim.x + threadIdx.x; float t_addition = dbyte_to_frequency(fp->local_frequency[i]) + dbyte_to_frequency((db+j)->local_frequency[i]); float t_absdiff = abs(dbyte_to_frequency(fp->local_frequency[i]) - dbyte_to_frequency((db+j)->local_frequency[i])); atomicAdd(&s_addition, t_addition); atomicAdd(&s_absdiff, t_absdiff); if (i == 0) { result[blockIdx.x] = 1 - (s_absdiff/s_addition); } } __global__ void calculate_s3(fingerprint* db, fingerprint* fp, float* result) { int j = blockIdx.x; result[j] = 1 - (abs(dbyte_to_frequency(fp->avg_frequency)-dbyte_to_frequency((db+j)->avg_frequency))/max(dbyte_to_frequency(fp->avg_frequency), dbyte_to_frequency((db+j)->avg_frequency))); } __global__ void calculate_s3_with_mapping(fingerprint* db, fingerprint* fp, float* result, int* mapping) { int j = mapping[blockIdx.x]; result[blockIdx.x] = 1 - (abs(dbyte_to_frequency(fp->avg_frequency)-dbyte_to_frequency((db+j)->avg_frequency))/max(dbyte_to_frequency(fp->avg_frequency), dbyte_to_frequency((db+j)->avg_frequency))); } __global__ void calculate_s4(fingerprint* db, fingerprint* fp, float* result) { int j = blockIdx.x; result[j] = 1-(abs(dbyte_to_orientation(fp->avg_orientation)-dbyte_to_orientation((db+j)->avg_orientation))/180.0f); } __global__ void calculate_s4_with_mapping(fingerprint* db, fingerprint* fp, float* result, int* mapping) { int j = mapping[blockIdx.x]; result[blockIdx.x] = 1-(abs(dbyte_to_orientation(fp->avg_orientation)-dbyte_to_orientation((db+j)->avg_orientation))/180.0f); } __global__ void calculate_s(float* s1, float* s2, float*s3, float* s4, float* result) { int i = threadIdx.x; result[i] = w1*s1[i] + w2*s2[i] + w3*s3[i] + w4*s4[i]; } __global__ void calculate_s_with_mapping(float* s1, float* s2, float*s3, float* s4, float* result, int* mapping) { int i = threadIdx.x; result[i] = w1*s1[mapping[i]] + w2*s2[i] + w3*s3[i] + w4*s4[i]; } __global__ void get_top_fingerprints(float* s, float* result, int* mapping) { int i = threadIdx.x; result[i] = s[mapping[i]]; } int main(int argc, char** argv) { if (argc < 3) { cerr << "Usage : ./parallel_indexing fingerprint-to-be-searched fingerprint-db\n"; return 0; } string fp_filename = argv[1]; string db_filename = argv[2]; cerr << "FP " << fp_filename << " DB " << db_filename << endl; // Read the fingerprint to be searched vector<struct fingerprint> fp; int count_fp = read_from_file(fp, fp_filename); vector<float> local_orie, local_cohe, local_freq; get_fingerprint_local_values(fp[0], local_orie, local_cohe, local_freq); float avg_o = get_fingerprint_average_orientation(fp[0]); float avg_f = get_fingerprint_average_frequency(fp[0]); // Read the database vector<struct fingerprint> db; int count_db = read_from_file(db, db_filename); cerr << "Fingerprint core database count : " << count_db << endl; cerr << "Last fingerprint ID : " << db[count_db-1].id << endl; int count_db_fingerprint = (db[count_db-1].id-1)/5+1; cerr << "Fingerprint database count : " << count_db_fingerprint << endl; auto timer_start = chrono::steady_clock::now(); // Test S1 fingerprint *d_fp, *d_db; float s1_result[count_db], s2_result[count_db], s3_result[count_db], s4_result[count_db]; float result[count_db]; float *d_result; cudaMalloc((void **)&d_fp, sizeof(fingerprint)); cudaMalloc((void **)&d_db, count_db*sizeof(fingerprint)); cudaMalloc((void **)&d_result, count_db*sizeof(float)); //Mapping for block idx to fingerprint core idx int *d_mapping; cudaMalloc((void **)&d_mapping, count_db_fingerprint*sizeof(int)); cudaMemcpy(d_db, &db[0], count_db*sizeof(fingerprint), cudaMemcpyHostToDevice); cudaMemcpy(d_fp, &fp[0], sizeof(fingerprint), cudaMemcpyHostToDevice); calculate_s1<<<count_db,BLOCKSIZE>>>(d_db, d_fp, d_result, d_mapping); cudaMemcpy(&s1_result[0], d_result, count_db*sizeof(float), cudaMemcpyDeviceToHost); int mapping[count_db_fingerprint]; memset(mapping, 0, sizeof(mapping)); cudaMemcpy(&mapping[0], d_mapping, count_db_fingerprint*sizeof(int), cudaMemcpyDeviceToHost); // for (int i=0 ; i<count_db ; i++) { // cout << i << " : ID " << db[i].id << endl; // cout << "result = " << s1_result[i] << endl; // } // Check mapping cout << "MAPPING\n"; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << i << " " << mapping[i] << endl; } // Test S2 // Only calculate for 1 core per fingerprint // calculate_s2<<<count_db,BLOCKSIZE>>>(d_db, d_fp, d_result); calculate_s2_with_mapping<<<count_db_fingerprint,BLOCKSIZE>>>(d_db, d_fp, d_result, d_mapping); cudaMemcpy(&s2_result[0], d_result, count_db*sizeof(float), cudaMemcpyDeviceToHost); // cout << "\n\nS2\n"; // for (int i=0 ; i<count_db ; i++) { // cout << i << " : ID " << db[i].id << endl; // cout << "result = " << s2_result[i] << endl; // } // Test S3 // calculate_s3<<<count_db,1>>>(d_db, d_fp, d_result); calculate_s3_with_mapping<<<count_db_fingerprint,1>>>(d_db, d_fp, d_result,d_mapping); cudaMemcpy(&s3_result[0], d_result, count_db*sizeof(float), cudaMemcpyDeviceToHost); // cout << "\n\nS3\n"; // for (int i=0 ; i<count_db ; i++) { // cout << i << " : ID " << db[i].id << endl; // cout << "result = " << s3_result[i] << endl; // } // Test S4 // calculate_s4<<<count_db,1>>>(d_db, d_fp, d_result); calculate_s4_with_mapping<<<count_db_fingerprint,1>>>(d_db, d_fp, d_result, d_mapping); cudaMemcpy(&s4_result[0], d_result, count_db*sizeof(float), cudaMemcpyDeviceToHost); // cout << "\n\nS4\n"; // for (int i=0 ; i<count_db ; i++) { // cout << i << " : ID " << db[i].id << endl; // cout << "result = " << s4_result[i] << endl; // } // Test S // Copy S1-S4 to device float *d_s1_result, *d_s2_result, *d_s3_result, *d_s4_result; cudaMalloc((void **)&d_s1_result, count_db*sizeof(float)); cudaMalloc((void **)&d_s2_result, count_db*sizeof(float)); cudaMalloc((void **)&d_s3_result, count_db*sizeof(float)); cudaMalloc((void **)&d_s4_result, count_db*sizeof(float)); cudaMemcpy(d_s1_result, &s1_result[0], count_db*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_s2_result, &s2_result[0], count_db*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_s3_result, &s3_result[0], count_db*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_s4_result, &s4_result[0], count_db*sizeof(float), cudaMemcpyHostToDevice); // calculate_s<<<1,count_db>>>(d_s1_result, d_s2_result, d_s3_result, d_s4_result, d_result); calculate_s_with_mapping<<<1,count_db_fingerprint>>>(d_s1_result, d_s2_result, d_s3_result, d_s4_result, d_result, d_mapping); cudaMemcpy(&result[0], d_result, count_db*sizeof(float), cudaMemcpyDeviceToHost); cout << "\n\nS\n"; // for (int i=0 ; i<count_db ; i++) { /*for (int i=0 ; i<count_db_fingerprint ; i++) { cout << i << " : ID " << db[i].id << endl; cout << "result = " << result[i] << endl; }*/ float *d_final_result; /* This is for when not used with mapping */ // get_top_fingerprints<<<1,count_db_fingerprint>>>(d_result, d_final_result, d_mapping); // cudaMemcpy(&result[0], d_final_result, count_db_fingerprint*sizeof(float), cudaMemcpyDeviceToHost); cout << "\n\nFinal Result\n"; vector< pair<float, int> > best_matches; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << "result = " << result[i] << endl; best_matches.push_back(make_pair(result[i], db[mapping[i]].id)); } sort(best_matches.rbegin(), best_matches.rend()); cout << "\nBest match\n"; for (int i=0 ; i<best_matches.size() ; i++) { cout << "ID " << best_matches[i].second << "-"<< best_matches[i].second/5 <<"\t: " << best_matches[i].first << endl; } auto timer_end = chrono::steady_clock::now(); chrono::duration<double> diff = timer_end - timer_start; cout << "Time to get indexing result for " << count_db << " fingerprints in DB : " << diff.count() << endl; // DEBUG /*cout << "\nS1\n"; for (int i=0 ; i<count_db ; i++) { cout << s1_result[i] << endl; } cout << "\nS1\n"; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << s1_result[mapping[i]] << endl; } cout << "\nS2\n"; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << s2_result[i] << endl; } cout << "\nS3\n"; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << s3_result[i] << endl; } cout << "\nS4\n"; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << s4_result[i] << endl; } cout << "\nS\n"; for (int i=0 ; i<count_db_fingerprint ; i++) { cout << result[i] << endl; }*/ cudaFree(d_fp); cudaFree(d_db); cudaFree(d_result); cudaFree(d_mapping); cudaFree(d_s1_result); cudaFree(d_s2_result); cudaFree(d_s3_result); cudaFree(d_s4_result); cudaFree(d_final_result); return 0; } // nvcc -o parallel_indexing parallel_indexing.cu fingerprint_structure.cpp -std=c++11
3166d843a455d30c34ba364f08a5b7b4f2abcab2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // pbrt is Copyright(c) 1998-2020 Matt Pharr, Wenzel Jakob, and Greg Humphreys. // The pbrt source code is licensed under the Apache License, Version 2.0. // SPDX: Apache-2.0 #include <pbrt/pbrt.h> #include <pbrt/gpu/accel.h> #include <pbrt/gpu/optix.h> #include <pbrt/interaction.h> #include <pbrt/materials.h> #include <pbrt/media.h> #include <pbrt/shapes.h> #include <pbrt/textures.h> #include <pbrt/util/float.h> #include <pbrt/util/rng.h> #include <pbrt/util/transform.h> #include <pbrt/util/vecmath.h> #include <pbrt/util/color.cpp> // :-( #include <pbrt/util/colorspace.cpp> // :-( #include <pbrt/util/spectrum.cpp> // :-( #include <pbrt/util/transform.cpp> // :-( #include <optix_device.h> #include <utility> #ifdef PBRT_GPU_DBG #ifndef TO_STRING #define TO_STRING(x) TO_STRING2(x) #define TO_STRING2(x) #x #endif // !TO_STRING #define DBG(...) printf(__FILE__ ":" TO_STRING(__LINE__) ": " __VA_ARGS__) #else #define DBG(...) #endif // PBRT_GPU_DBG using namespace pbrt; extern "C" { extern __constant__ pbrt::RayIntersectParameters params; } /////////////////////////////////////////////////////////////////////////// // Utility functions // Payload management __device__ inline uint32_t packPointer0(void *ptr) { uint64_t uptr = reinterpret_cast<uint64_t>(ptr); return uptr >> 32; } __device__ inline uint32_t packPointer1(void *ptr) { uint64_t uptr = reinterpret_cast<uint64_t>(ptr); return uint32_t(uptr); } template <typename T> static __forceinline__ __device__ T *getPayload() { uint32_t p0 = optixGetPayload_0(), p1 = optixGetPayload_1(); const uint64_t uptr = (uint64_t(p0) << 32) | p1; return reinterpret_cast<T *>(uptr); } template <typename... Args> __device__ inline void Trace(OptixTraversableHandle traversable, Ray ray, Float tMin, Float tMax, OptixRayFlags flags, Args &&... payload) { optixTrace(traversable, make_float3(ray.o.x, ray.o.y, ray.o.z), make_float3(ray.d.x, ray.d.y, ray.d.z), tMin, tMax, ray.time, OptixVisibilityMask(255), flags, 0, /* ray type */ 1, /* number of ray types */ 0, /* missSBTIndex */ std::forward<Args>(payload)...); } /////////////////////////////////////////////////////////////////////////// // Closest hit struct ClosestHitContext { PBRT_GPU ClosestHitContext(MediumHandle rayMedium, bool shadowRay) : rayMedium(rayMedium), shadowRay(shadowRay) {} MediumHandle rayMedium; bool shadowRay; // out Point3fi piHit; Normal3f nHit; MaterialHandle material; MediumInterface mediumInterface; PBRT_GPU Ray SpawnRayTo(const Point3f &p) const { Interaction intr(piHit, nHit); intr.mediumInterface = &mediumInterface; return intr.SpawnRayTo(p); } }; extern "C" __global__ void __raygen__findClosest() { int rayIndex(optixGetLaunchIndex().x); if (rayIndex >= params.rayQueue->Size()) return; RayWorkItem r = (*params.rayQueue)[rayIndex]; Ray ray = r.ray; Float tMax = 1e30f; ClosestHitContext ctx(ray.medium, false); uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx); DBG("ray o %f %f %f dir %f %f %f tmax %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); uint32_t missed = 0; Trace(params.traversable, ray, 0.f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1, missed); if (missed) { if (ray.medium) { DBG("Adding miss ray to mediumSampleQueue. " "ray %f %f %f d %f %f %f beta %f %f %f %f\n", r.ray.o.x, r.ray.o.y, r.ray.o.z, r.ray.d.x, r.ray.d.y, r.ray.d.z, r.beta[0], r.beta[1], r.beta[2], r.beta[3]); params.mediumSampleQueue->Push(r.ray, Infinity, r.lambda, r.beta, r.pdfUni, r.pdfNEE, rayIndex, r.pixelIndex, r.piPrev, r.nPrev, r.nsPrev, r.isSpecularBounce, r.anyNonSpecularBounces, r.etaScale); } else if (params.escapedRayQueue) { DBG("Adding ray to escapedRayQueue ray index %d pixel index %d\n", rayIndex, r.pixelIndex); params.escapedRayQueue->Push(EscapedRayWorkItem{ r.beta, r.pdfUni, r.pdfNEE, r.lambda, ray.o, ray.d, r.piPrev, r.nPrev, r.nsPrev, (int)r.isSpecularBounce, r.pixelIndex}); } } } extern "C" __global__ void __miss__noop() { optixSetPayload_2(1); } static __forceinline__ __device__ void ProcessClosestIntersection( SurfaceInteraction intr) { int rayIndex = optixGetLaunchIndex().x; MediumHandle rayMedium = getPayload<ClosestHitContext>()->rayMedium; if (intr.mediumInterface) getPayload<ClosestHitContext>()->mediumInterface = *intr.mediumInterface; else getPayload<ClosestHitContext>()->mediumInterface = MediumInterface(rayMedium); getPayload<ClosestHitContext>()->piHit = intr.pi; getPayload<ClosestHitContext>()->nHit = intr.n; getPayload<ClosestHitContext>()->material = intr.material; if (getPayload<ClosestHitContext>()->shadowRay) return; // We only have the ray queue (and it only makes sense to access) for // regular closest hit rays. RayWorkItem r = (*params.rayQueue)[rayIndex]; if (rayMedium) { assert(params.mediumSampleQueue); DBG("Enqueuing into medium sample queue\n"); params.mediumSampleQueue->Push( MediumSampleWorkItem{r.ray, optixGetRayTmax(), r.lambda, r.beta, r.pdfUni, r.pdfNEE, rayIndex, r.pixelIndex, r.piPrev, r.nPrev, r.nsPrev, r.isSpecularBounce, r.anyNonSpecularBounces, r.etaScale, intr.areaLight, intr.pi, intr.n, -r.ray.d, intr.uv, intr.material, intr.shading.n, intr.shading.dpdu, intr.shading.dpdv, intr.shading.dndu, intr.shading.dndv, getPayload<ClosestHitContext>()->mediumInterface}); return; } // FIXME: this is all basically duplicate code w/medium.cpp MaterialHandle material = intr.material; if (!material) { DBG("Enqueuing into medium transition queue: ray index %d pixel index %d \n", rayIndex, r.pixelIndex); Ray newRay = intr.SpawnRay(r.ray.d); params.mediumTransitionQueue->Push(MediumTransitionWorkItem{ newRay, r.lambda, r.beta, r.pdfUni, r.pdfNEE, r.piPrev, r.nPrev, r.nsPrev, r.isSpecularBounce, r.anyNonSpecularBounces, r.etaScale, r.pixelIndex}); return; } if (intr.areaLight) { DBG("Ray hit an area light: adding to hitAreaLightQueue ray index %d pixel index " "%d\n", rayIndex, r.pixelIndex); Ray ray = r.ray; // TODO: intr.wo == -ray.d? params.hitAreaLightQueue->Push(HitAreaLightWorkItem{ intr.areaLight, r.lambda, r.beta, r.pdfUni, r.pdfNEE, intr.p(), intr.n, intr.uv, intr.wo, r.piPrev, ray.d, ray.time, r.nPrev, r.nsPrev, (int)r.isSpecularBounce, r.pixelIndex}); } FloatTextureHandle displacement = material.GetDisplacement(); MaterialEvalQueue *q = (material.CanEvaluateTextures(BasicTextureEvaluator()) && (!displacement || BasicTextureEvaluator().CanEvaluate({displacement}, {}))) ? params.basicEvalMaterialQueue : params.universalEvalMaterialQueue; DBG("Enqueuing for material eval, mtl tag %d\n", material.Tag()); auto enqueue = [=](auto ptr) { using Material = typename std::remove_reference_t<decltype(*ptr)>; q->Push<Material>(MaterialEvalWorkItem<Material>{ ptr, r.lambda, r.beta, r.pdfUni, intr.pi, intr.n, intr.shading.n, intr.shading.dpdu, intr.shading.dpdv, intr.shading.dndu, intr.shading.dndv, intr.wo, intr.uv, intr.time, r.anyNonSpecularBounces, r.etaScale, getPayload<ClosestHitContext>()->mediumInterface, rayIndex, r.pixelIndex}); }; material.Dispatch(enqueue); DBG("Closest hit found intersection at t %f\n", optixGetRayTmax()); } /////////////////////////////////////////////////////////////////////////// // Triangles static __forceinline__ __device__ pstd::optional<SurfaceInteraction> getTriangleIntersection() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); float b1 = optixGetTriangleBarycentrics().x; float b2 = optixGetTriangleBarycentrics().y; float b0 = 1 - b1 - b2; float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); assert(optixGetTransformListSize() == 1); float worldFromObj[12], objFromWorld[12]; optixGetObjectToWorldTransformMatrix(worldFromObj); optixGetWorldToObjectTransformMatrix(objFromWorld); SquareMatrix<4> worldFromObjM(worldFromObj[0], worldFromObj[1], worldFromObj[2], worldFromObj[3], worldFromObj[4], worldFromObj[5], worldFromObj[6], worldFromObj[7], worldFromObj[8], worldFromObj[9], worldFromObj[10], worldFromObj[11], 0.f, 0.f, 0.f, 1.f); SquareMatrix<4> objFromWorldM(objFromWorld[0], objFromWorld[1], objFromWorld[2], objFromWorld[3], objFromWorld[4], objFromWorld[5], objFromWorld[6], objFromWorld[7], objFromWorld[8], objFromWorld[9], objFromWorld[10], objFromWorld[11], 0.f, 0.f, 0.f, 1.f); Transform worldFromInstance(worldFromObjM, objFromWorldM); return Triangle::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(), {b0, b1, b2}, optixGetRayTime(), wo, worldFromInstance); } static __forceinline__ __device__ bool alphaKilled(const TriangleMeshRecord &rec) { if (!rec.alphaTexture) return false; pstd::optional<SurfaceInteraction> intr = getTriangleIntersection(); if (!intr) return true; BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, *intr); return alpha == 0; } extern "C" __global__ void __closesthit__triangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); // It's slightly dicey to assume intr is valid. But invalid would // presumably mean that OptiX returned a hit with a degenerate // triangle... SurfaceInteraction intr = *getTriangleIntersection(); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (!rec.areaLights.empty()) intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()]; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__triangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); if (alphaKilled(rec)) optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__shadowTriangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); if (rec.material && rec.material.IsTransparent()) optixIgnoreIntersection(); if (alphaKilled(rec)) optixIgnoreIntersection(); } /////////////////////////////////////////////////////////////////////////// // Shadow rays extern "C" __global__ void __raygen__shadow() { int index = optixGetLaunchIndex().x; if (index >= params.shadowRayQueue->Size()) return; ShadowRayWorkItem sr = (*params.shadowRayQueue)[index]; uint32_t missed = 0; Trace(params.traversable, sr.ray, 1e-5f /* tMin */, sr.tMax, OPTIX_RAY_FLAG_NONE, missed); SampledSpectrum Ld; if (missed) Ld = sr.Ld / (sr.pdfUni + sr.pdfNEE).Average(); else Ld = SampledSpectrum(0.); params.shadowRayQueue->Ld[index] = Ld; } extern "C" __global__ void __miss__shadow() { optixSetPayload_0(1); } extern "C" __global__ void __raygen__shadow_Tr() { DBG("raygen sahadow tr %d\n", optixGetLaunchIndex().x); int index = optixGetLaunchIndex().x; if (index >= params.shadowRayQueue->Size()) return; ShadowRayWorkItem sr = (*params.shadowRayQueue)[index]; SampledWavelengths lambda = sr.lambda; SampledSpectrum Ld = sr.Ld; DBG("Initial Ld %f %f %f %f shadow ray index %d pixel index %d\n", Ld[0], Ld[1], Ld[2], Ld[3], index, sr.pixelIndex); SampledSpectrum pdfUni = sr.pdfUni, pdfNEE = sr.pdfNEE; Ray ray = sr.ray; Float tMax = sr.tMax; Point3f pLight = ray(tMax); RNG rng(Hash(ray.o), Hash(ray.d)); while (true) { ClosestHitContext ctx(ray.medium, true); uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx); DBG("Tracing shadow tr shadow ray index %d pixel index %d " "ray %f %f %f d %f %f %f tMax %f\n", index, sr.pixelIndex, ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); uint32_t missed = 0; Trace(params.traversable, ray, 1e-5f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1, missed); if (!missed && ctx.material) { DBG("Hit opaque. Bye\n"); // Hit opaque surface Ld = SampledSpectrum(0.f); break; } if (ray.medium) { DBG("Ray medium %p. Will sample tmaj...\n", ray.medium.ptr()); Float tEnd = missed ? tMax : (Distance(ray.o, Point3f(ctx.piHit)) / Length(ray.d)); ray.medium.SampleTmaj(ray, tEnd, rng, lambda, [&](const MediumSample &mediumSample) { if (!mediumSample.intr) // FIXME: include last Tmaj? return false; const SampledSpectrum &Tmaj = mediumSample.Tmaj; const MediumInteraction &intr = *mediumSample.intr; SampledSpectrum sigma_n = intr.sigma_n(); // ratio-tracking: only evaluate null scattering Ld *= Tmaj * sigma_n; pdfNEE *= Tmaj * intr.sigma_maj; pdfUni *= Tmaj * sigma_n; if (!Ld) return false; if (Ld.MaxComponentValue() > 0x1p24f || pdfNEE.MaxComponentValue() > 0x1p24f || pdfUni.MaxComponentValue() > 0x1p24f) { Ld *= 1.f / 0x1p24f; pdfNEE *= 1.f / 0x1p24f; pdfUni *= 1.f / 0x1p24f; } return true; }); } if (missed || !Ld) // done break; ray = ctx.SpawnRayTo(pLight); if (ray.d == Vector3f(0, 0, 0)) break; } Ld /= (pdfUni + pdfNEE).Average(); DBG("Setting final Ld for shadow ray index %d pixel index %d = as %f %f %f %f\n", index, sr.pixelIndex, Ld[0], Ld[1], Ld[2], Ld[3]); params.shadowRayQueue->Ld[index] = Ld; } extern "C" __global__ void __miss__shadow_Tr() { optixSetPayload_2(1); } ///////////////////////////////////////////////////////////////////////////////////// // Quadrics static __device__ inline SurfaceInteraction getQuadricIntersection( const QuadricIntersection &si) { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); Float time = optixGetRayTime(); SurfaceInteraction intr; if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>()) intr = sphere->InteractionFromIntersection(si, wo, time); else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>()) intr = cylinder->InteractionFromIntersection(si, wo, time); else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>()) intr = disk->InteractionFromIntersection(si, wo, time); else assert(!"unexpected quadric"); return intr; } extern "C" __global__ void __closesthit__quadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); QuadricIntersection qi; qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()), BitsToFloat(optixGetAttribute_2())); qi.phi = BitsToFloat(optixGetAttribute_3()); SurfaceInteraction intr = getQuadricIntersection(qi); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (rec.areaLight) intr.areaLight = rec.areaLight; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__shadowQuadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); if (rec.material && rec.material.IsTransparent()) optixIgnoreIntersection(); } extern "C" __global__ void __intersection__quadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); float3 org = optixGetObjectRayOrigin(); float3 dir = optixGetObjectRayDirection(); Float tMax = optixGetRayTmax(); Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z)); pstd::optional<QuadricIntersection> isect; if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>()) isect = sphere->BasicIntersect(ray, tMax); else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>()) isect = cylinder->BasicIntersect(ray, tMax); else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>()) isect = disk->BasicIntersect(ray, tMax); if (!isect) return; if (rec.alphaTexture) { SurfaceInteraction intr = getQuadricIntersection(*isect); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha == 0) // No hit return; } optixReportIntersection(isect->tHit, 0 /* hit kind */, FloatToBits(isect->pObj.x), FloatToBits(isect->pObj.y), FloatToBits(isect->pObj.z), FloatToBits(isect->phi)); } /////////////////////////////////////////////////////////////////////////// // Bilinear patches static __forceinline__ __device__ SurfaceInteraction getBilinearPatchIntersection(Point2f uv) { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); return BilinearPatch::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(), uv, optixGetRayTime(), wo); } extern "C" __global__ void __closesthit__bilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1())); SurfaceInteraction intr = getBilinearPatchIntersection(uv); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (!rec.areaLights.empty()) intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()]; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__shadowBilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); if (rec.material && rec.material.IsTransparent()) optixIgnoreIntersection(); } extern "C" __global__ void __intersection__bilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); float3 org = optixGetObjectRayOrigin(); float3 dir = optixGetObjectRayDirection(); Float tMax = optixGetRayTmax(); Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z)); int vertexIndex = 4 * optixGetPrimitiveIndex(); Point3f p00 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex]]; Point3f p10 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 1]]; Point3f p01 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 2]]; Point3f p11 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 3]]; pstd::optional<BilinearIntersection> isect = BilinearPatch::Intersect(ray, tMax, p00, p10, p01, p11); if (!isect) return; if (rec.alphaTexture) { SurfaceInteraction intr = getBilinearPatchIntersection(isect->uv); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha == 0) // No intersection return; } optixReportIntersection(isect->t, 0 /* hit kind */, FloatToBits(isect->uv[0]), FloatToBits(isect->uv[1])); } /////////////////////////////////////////////////////////////////////////// // Random hit (for subsurface scattering) struct RandomHitPayload { WeightedReservoirSampler<SubsurfaceInteraction> wrs; MaterialHandle material; }; extern "C" __global__ void __raygen__randomHit() { // Keep as uint32_t so can pass directly to optixTrace. uint32_t index = optixGetLaunchIndex().x; if (index >= params.subsurfaceScatterQueue->Size()) return; SubsurfaceScatterWorkItem s = (*params.subsurfaceScatterQueue)[index]; Ray ray(s.p0, s.p1 - s.p0); Float tMax = 1.f; RandomHitPayload payload; payload.wrs.Seed(Hash(s.p0, s.p1)); payload.material = s.material; uint32_t ptr0 = packPointer0(&payload), ptr1 = packPointer1(&payload); DBG("Randomhit raygen ray.o %f %f %f ray.d %f %f %f tMax %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); Trace(params.traversable, ray, 0.f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, ptr0, ptr1); if (payload.wrs.HasSample() && payload.wrs.WeightSum() > 0) { // TODO: latter check shouldn't be needed... const SubsurfaceInteraction &si = payload.wrs.GetSample(); DBG("optix si p %f %f %f n %f %f %f\n", si.p().x, si.p().y, si.p().z, si.n.x, si.n.y, si.n.z); params.subsurfaceScatterQueue->weight[index] = payload.wrs.WeightSum(); params.subsurfaceScatterQueue->ssi[index] = payload.wrs.GetSample(); } else params.subsurfaceScatterQueue->weight[index] = 0; } extern "C" __global__ void __anyhit__randomHitTriangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); RandomHitPayload *p = getPayload<RandomHitPayload>(); DBG("Anyhit triangle for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); if (rec.material == p->material) p->wrs.Add([&] PBRT_CPU_GPU() { return *getTriangleIntersection(); }, 1.f); optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__randomHitBilinearPatch() { BilinearMeshRecord &rec = *(BilinearMeshRecord *)optixGetSbtDataPointer(); RandomHitPayload *p = getPayload<RandomHitPayload>(); DBG("Anyhit blp for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); if (rec.material == p->material) p->wrs.Add( [&] PBRT_CPU_GPU() { Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1())); return getBilinearPatchIntersection(uv); }, 1.f); optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__randomHitQuadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); RandomHitPayload *p = getPayload<RandomHitPayload>(); DBG("Anyhit quadric for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); if (rec.material == p->material) { p->wrs.Add( [&] PBRT_CPU_GPU() { QuadricIntersection qi; qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()), BitsToFloat(optixGetAttribute_2())); qi.phi = BitsToFloat(optixGetAttribute_3()); return getQuadricIntersection(qi); }, 1.f); } optixIgnoreIntersection(); }
3166d843a455d30c34ba364f08a5b7b4f2abcab2.cu
// pbrt is Copyright(c) 1998-2020 Matt Pharr, Wenzel Jakob, and Greg Humphreys. // The pbrt source code is licensed under the Apache License, Version 2.0. // SPDX: Apache-2.0 #include <pbrt/pbrt.h> #include <pbrt/gpu/accel.h> #include <pbrt/gpu/optix.h> #include <pbrt/interaction.h> #include <pbrt/materials.h> #include <pbrt/media.h> #include <pbrt/shapes.h> #include <pbrt/textures.h> #include <pbrt/util/float.h> #include <pbrt/util/rng.h> #include <pbrt/util/transform.h> #include <pbrt/util/vecmath.h> #include <pbrt/util/color.cpp> // :-( #include <pbrt/util/colorspace.cpp> // :-( #include <pbrt/util/spectrum.cpp> // :-( #include <pbrt/util/transform.cpp> // :-( #include <optix_device.h> #include <utility> #ifdef PBRT_GPU_DBG #ifndef TO_STRING #define TO_STRING(x) TO_STRING2(x) #define TO_STRING2(x) #x #endif // !TO_STRING #define DBG(...) printf(__FILE__ ":" TO_STRING(__LINE__) ": " __VA_ARGS__) #else #define DBG(...) #endif // PBRT_GPU_DBG using namespace pbrt; extern "C" { extern __constant__ pbrt::RayIntersectParameters params; } /////////////////////////////////////////////////////////////////////////// // Utility functions // Payload management __device__ inline uint32_t packPointer0(void *ptr) { uint64_t uptr = reinterpret_cast<uint64_t>(ptr); return uptr >> 32; } __device__ inline uint32_t packPointer1(void *ptr) { uint64_t uptr = reinterpret_cast<uint64_t>(ptr); return uint32_t(uptr); } template <typename T> static __forceinline__ __device__ T *getPayload() { uint32_t p0 = optixGetPayload_0(), p1 = optixGetPayload_1(); const uint64_t uptr = (uint64_t(p0) << 32) | p1; return reinterpret_cast<T *>(uptr); } template <typename... Args> __device__ inline void Trace(OptixTraversableHandle traversable, Ray ray, Float tMin, Float tMax, OptixRayFlags flags, Args &&... payload) { optixTrace(traversable, make_float3(ray.o.x, ray.o.y, ray.o.z), make_float3(ray.d.x, ray.d.y, ray.d.z), tMin, tMax, ray.time, OptixVisibilityMask(255), flags, 0, /* ray type */ 1, /* number of ray types */ 0, /* missSBTIndex */ std::forward<Args>(payload)...); } /////////////////////////////////////////////////////////////////////////// // Closest hit struct ClosestHitContext { PBRT_GPU ClosestHitContext(MediumHandle rayMedium, bool shadowRay) : rayMedium(rayMedium), shadowRay(shadowRay) {} MediumHandle rayMedium; bool shadowRay; // out Point3fi piHit; Normal3f nHit; MaterialHandle material; MediumInterface mediumInterface; PBRT_GPU Ray SpawnRayTo(const Point3f &p) const { Interaction intr(piHit, nHit); intr.mediumInterface = &mediumInterface; return intr.SpawnRayTo(p); } }; extern "C" __global__ void __raygen__findClosest() { int rayIndex(optixGetLaunchIndex().x); if (rayIndex >= params.rayQueue->Size()) return; RayWorkItem r = (*params.rayQueue)[rayIndex]; Ray ray = r.ray; Float tMax = 1e30f; ClosestHitContext ctx(ray.medium, false); uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx); DBG("ray o %f %f %f dir %f %f %f tmax %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); uint32_t missed = 0; Trace(params.traversable, ray, 0.f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1, missed); if (missed) { if (ray.medium) { DBG("Adding miss ray to mediumSampleQueue. " "ray %f %f %f d %f %f %f beta %f %f %f %f\n", r.ray.o.x, r.ray.o.y, r.ray.o.z, r.ray.d.x, r.ray.d.y, r.ray.d.z, r.beta[0], r.beta[1], r.beta[2], r.beta[3]); params.mediumSampleQueue->Push(r.ray, Infinity, r.lambda, r.beta, r.pdfUni, r.pdfNEE, rayIndex, r.pixelIndex, r.piPrev, r.nPrev, r.nsPrev, r.isSpecularBounce, r.anyNonSpecularBounces, r.etaScale); } else if (params.escapedRayQueue) { DBG("Adding ray to escapedRayQueue ray index %d pixel index %d\n", rayIndex, r.pixelIndex); params.escapedRayQueue->Push(EscapedRayWorkItem{ r.beta, r.pdfUni, r.pdfNEE, r.lambda, ray.o, ray.d, r.piPrev, r.nPrev, r.nsPrev, (int)r.isSpecularBounce, r.pixelIndex}); } } } extern "C" __global__ void __miss__noop() { optixSetPayload_2(1); } static __forceinline__ __device__ void ProcessClosestIntersection( SurfaceInteraction intr) { int rayIndex = optixGetLaunchIndex().x; MediumHandle rayMedium = getPayload<ClosestHitContext>()->rayMedium; if (intr.mediumInterface) getPayload<ClosestHitContext>()->mediumInterface = *intr.mediumInterface; else getPayload<ClosestHitContext>()->mediumInterface = MediumInterface(rayMedium); getPayload<ClosestHitContext>()->piHit = intr.pi; getPayload<ClosestHitContext>()->nHit = intr.n; getPayload<ClosestHitContext>()->material = intr.material; if (getPayload<ClosestHitContext>()->shadowRay) return; // We only have the ray queue (and it only makes sense to access) for // regular closest hit rays. RayWorkItem r = (*params.rayQueue)[rayIndex]; if (rayMedium) { assert(params.mediumSampleQueue); DBG("Enqueuing into medium sample queue\n"); params.mediumSampleQueue->Push( MediumSampleWorkItem{r.ray, optixGetRayTmax(), r.lambda, r.beta, r.pdfUni, r.pdfNEE, rayIndex, r.pixelIndex, r.piPrev, r.nPrev, r.nsPrev, r.isSpecularBounce, r.anyNonSpecularBounces, r.etaScale, intr.areaLight, intr.pi, intr.n, -r.ray.d, intr.uv, intr.material, intr.shading.n, intr.shading.dpdu, intr.shading.dpdv, intr.shading.dndu, intr.shading.dndv, getPayload<ClosestHitContext>()->mediumInterface}); return; } // FIXME: this is all basically duplicate code w/medium.cpp MaterialHandle material = intr.material; if (!material) { DBG("Enqueuing into medium transition queue: ray index %d pixel index %d \n", rayIndex, r.pixelIndex); Ray newRay = intr.SpawnRay(r.ray.d); params.mediumTransitionQueue->Push(MediumTransitionWorkItem{ newRay, r.lambda, r.beta, r.pdfUni, r.pdfNEE, r.piPrev, r.nPrev, r.nsPrev, r.isSpecularBounce, r.anyNonSpecularBounces, r.etaScale, r.pixelIndex}); return; } if (intr.areaLight) { DBG("Ray hit an area light: adding to hitAreaLightQueue ray index %d pixel index " "%d\n", rayIndex, r.pixelIndex); Ray ray = r.ray; // TODO: intr.wo == -ray.d? params.hitAreaLightQueue->Push(HitAreaLightWorkItem{ intr.areaLight, r.lambda, r.beta, r.pdfUni, r.pdfNEE, intr.p(), intr.n, intr.uv, intr.wo, r.piPrev, ray.d, ray.time, r.nPrev, r.nsPrev, (int)r.isSpecularBounce, r.pixelIndex}); } FloatTextureHandle displacement = material.GetDisplacement(); MaterialEvalQueue *q = (material.CanEvaluateTextures(BasicTextureEvaluator()) && (!displacement || BasicTextureEvaluator().CanEvaluate({displacement}, {}))) ? params.basicEvalMaterialQueue : params.universalEvalMaterialQueue; DBG("Enqueuing for material eval, mtl tag %d\n", material.Tag()); auto enqueue = [=](auto ptr) { using Material = typename std::remove_reference_t<decltype(*ptr)>; q->Push<Material>(MaterialEvalWorkItem<Material>{ ptr, r.lambda, r.beta, r.pdfUni, intr.pi, intr.n, intr.shading.n, intr.shading.dpdu, intr.shading.dpdv, intr.shading.dndu, intr.shading.dndv, intr.wo, intr.uv, intr.time, r.anyNonSpecularBounces, r.etaScale, getPayload<ClosestHitContext>()->mediumInterface, rayIndex, r.pixelIndex}); }; material.Dispatch(enqueue); DBG("Closest hit found intersection at t %f\n", optixGetRayTmax()); } /////////////////////////////////////////////////////////////////////////// // Triangles static __forceinline__ __device__ pstd::optional<SurfaceInteraction> getTriangleIntersection() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); float b1 = optixGetTriangleBarycentrics().x; float b2 = optixGetTriangleBarycentrics().y; float b0 = 1 - b1 - b2; float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); assert(optixGetTransformListSize() == 1); float worldFromObj[12], objFromWorld[12]; optixGetObjectToWorldTransformMatrix(worldFromObj); optixGetWorldToObjectTransformMatrix(objFromWorld); SquareMatrix<4> worldFromObjM(worldFromObj[0], worldFromObj[1], worldFromObj[2], worldFromObj[3], worldFromObj[4], worldFromObj[5], worldFromObj[6], worldFromObj[7], worldFromObj[8], worldFromObj[9], worldFromObj[10], worldFromObj[11], 0.f, 0.f, 0.f, 1.f); SquareMatrix<4> objFromWorldM(objFromWorld[0], objFromWorld[1], objFromWorld[2], objFromWorld[3], objFromWorld[4], objFromWorld[5], objFromWorld[6], objFromWorld[7], objFromWorld[8], objFromWorld[9], objFromWorld[10], objFromWorld[11], 0.f, 0.f, 0.f, 1.f); Transform worldFromInstance(worldFromObjM, objFromWorldM); return Triangle::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(), {b0, b1, b2}, optixGetRayTime(), wo, worldFromInstance); } static __forceinline__ __device__ bool alphaKilled(const TriangleMeshRecord &rec) { if (!rec.alphaTexture) return false; pstd::optional<SurfaceInteraction> intr = getTriangleIntersection(); if (!intr) return true; BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, *intr); return alpha == 0; } extern "C" __global__ void __closesthit__triangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); // It's slightly dicey to assume intr is valid. But invalid would // presumably mean that OptiX returned a hit with a degenerate // triangle... SurfaceInteraction intr = *getTriangleIntersection(); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (!rec.areaLights.empty()) intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()]; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__triangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); if (alphaKilled(rec)) optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__shadowTriangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); if (rec.material && rec.material.IsTransparent()) optixIgnoreIntersection(); if (alphaKilled(rec)) optixIgnoreIntersection(); } /////////////////////////////////////////////////////////////////////////// // Shadow rays extern "C" __global__ void __raygen__shadow() { int index = optixGetLaunchIndex().x; if (index >= params.shadowRayQueue->Size()) return; ShadowRayWorkItem sr = (*params.shadowRayQueue)[index]; uint32_t missed = 0; Trace(params.traversable, sr.ray, 1e-5f /* tMin */, sr.tMax, OPTIX_RAY_FLAG_NONE, missed); SampledSpectrum Ld; if (missed) Ld = sr.Ld / (sr.pdfUni + sr.pdfNEE).Average(); else Ld = SampledSpectrum(0.); params.shadowRayQueue->Ld[index] = Ld; } extern "C" __global__ void __miss__shadow() { optixSetPayload_0(1); } extern "C" __global__ void __raygen__shadow_Tr() { DBG("raygen sahadow tr %d\n", optixGetLaunchIndex().x); int index = optixGetLaunchIndex().x; if (index >= params.shadowRayQueue->Size()) return; ShadowRayWorkItem sr = (*params.shadowRayQueue)[index]; SampledWavelengths lambda = sr.lambda; SampledSpectrum Ld = sr.Ld; DBG("Initial Ld %f %f %f %f shadow ray index %d pixel index %d\n", Ld[0], Ld[1], Ld[2], Ld[3], index, sr.pixelIndex); SampledSpectrum pdfUni = sr.pdfUni, pdfNEE = sr.pdfNEE; Ray ray = sr.ray; Float tMax = sr.tMax; Point3f pLight = ray(tMax); RNG rng(Hash(ray.o), Hash(ray.d)); while (true) { ClosestHitContext ctx(ray.medium, true); uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx); DBG("Tracing shadow tr shadow ray index %d pixel index %d " "ray %f %f %f d %f %f %f tMax %f\n", index, sr.pixelIndex, ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); uint32_t missed = 0; Trace(params.traversable, ray, 1e-5f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1, missed); if (!missed && ctx.material) { DBG("Hit opaque. Bye\n"); // Hit opaque surface Ld = SampledSpectrum(0.f); break; } if (ray.medium) { DBG("Ray medium %p. Will sample tmaj...\n", ray.medium.ptr()); Float tEnd = missed ? tMax : (Distance(ray.o, Point3f(ctx.piHit)) / Length(ray.d)); ray.medium.SampleTmaj(ray, tEnd, rng, lambda, [&](const MediumSample &mediumSample) { if (!mediumSample.intr) // FIXME: include last Tmaj? return false; const SampledSpectrum &Tmaj = mediumSample.Tmaj; const MediumInteraction &intr = *mediumSample.intr; SampledSpectrum sigma_n = intr.sigma_n(); // ratio-tracking: only evaluate null scattering Ld *= Tmaj * sigma_n; pdfNEE *= Tmaj * intr.sigma_maj; pdfUni *= Tmaj * sigma_n; if (!Ld) return false; if (Ld.MaxComponentValue() > 0x1p24f || pdfNEE.MaxComponentValue() > 0x1p24f || pdfUni.MaxComponentValue() > 0x1p24f) { Ld *= 1.f / 0x1p24f; pdfNEE *= 1.f / 0x1p24f; pdfUni *= 1.f / 0x1p24f; } return true; }); } if (missed || !Ld) // done break; ray = ctx.SpawnRayTo(pLight); if (ray.d == Vector3f(0, 0, 0)) break; } Ld /= (pdfUni + pdfNEE).Average(); DBG("Setting final Ld for shadow ray index %d pixel index %d = as %f %f %f %f\n", index, sr.pixelIndex, Ld[0], Ld[1], Ld[2], Ld[3]); params.shadowRayQueue->Ld[index] = Ld; } extern "C" __global__ void __miss__shadow_Tr() { optixSetPayload_2(1); } ///////////////////////////////////////////////////////////////////////////////////// // Quadrics static __device__ inline SurfaceInteraction getQuadricIntersection( const QuadricIntersection &si) { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); Float time = optixGetRayTime(); SurfaceInteraction intr; if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>()) intr = sphere->InteractionFromIntersection(si, wo, time); else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>()) intr = cylinder->InteractionFromIntersection(si, wo, time); else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>()) intr = disk->InteractionFromIntersection(si, wo, time); else assert(!"unexpected quadric"); return intr; } extern "C" __global__ void __closesthit__quadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); QuadricIntersection qi; qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()), BitsToFloat(optixGetAttribute_2())); qi.phi = BitsToFloat(optixGetAttribute_3()); SurfaceInteraction intr = getQuadricIntersection(qi); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (rec.areaLight) intr.areaLight = rec.areaLight; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__shadowQuadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); if (rec.material && rec.material.IsTransparent()) optixIgnoreIntersection(); } extern "C" __global__ void __intersection__quadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); float3 org = optixGetObjectRayOrigin(); float3 dir = optixGetObjectRayDirection(); Float tMax = optixGetRayTmax(); Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z)); pstd::optional<QuadricIntersection> isect; if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>()) isect = sphere->BasicIntersect(ray, tMax); else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>()) isect = cylinder->BasicIntersect(ray, tMax); else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>()) isect = disk->BasicIntersect(ray, tMax); if (!isect) return; if (rec.alphaTexture) { SurfaceInteraction intr = getQuadricIntersection(*isect); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha == 0) // No hit return; } optixReportIntersection(isect->tHit, 0 /* hit kind */, FloatToBits(isect->pObj.x), FloatToBits(isect->pObj.y), FloatToBits(isect->pObj.z), FloatToBits(isect->phi)); } /////////////////////////////////////////////////////////////////////////// // Bilinear patches static __forceinline__ __device__ SurfaceInteraction getBilinearPatchIntersection(Point2f uv) { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); return BilinearPatch::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(), uv, optixGetRayTime(), wo); } extern "C" __global__ void __closesthit__bilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1())); SurfaceInteraction intr = getBilinearPatchIntersection(uv); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (!rec.areaLights.empty()) intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()]; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__shadowBilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); if (rec.material && rec.material.IsTransparent()) optixIgnoreIntersection(); } extern "C" __global__ void __intersection__bilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); float3 org = optixGetObjectRayOrigin(); float3 dir = optixGetObjectRayDirection(); Float tMax = optixGetRayTmax(); Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z)); int vertexIndex = 4 * optixGetPrimitiveIndex(); Point3f p00 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex]]; Point3f p10 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 1]]; Point3f p01 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 2]]; Point3f p11 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 3]]; pstd::optional<BilinearIntersection> isect = BilinearPatch::Intersect(ray, tMax, p00, p10, p01, p11); if (!isect) return; if (rec.alphaTexture) { SurfaceInteraction intr = getBilinearPatchIntersection(isect->uv); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha == 0) // No intersection return; } optixReportIntersection(isect->t, 0 /* hit kind */, FloatToBits(isect->uv[0]), FloatToBits(isect->uv[1])); } /////////////////////////////////////////////////////////////////////////// // Random hit (for subsurface scattering) struct RandomHitPayload { WeightedReservoirSampler<SubsurfaceInteraction> wrs; MaterialHandle material; }; extern "C" __global__ void __raygen__randomHit() { // Keep as uint32_t so can pass directly to optixTrace. uint32_t index = optixGetLaunchIndex().x; if (index >= params.subsurfaceScatterQueue->Size()) return; SubsurfaceScatterWorkItem s = (*params.subsurfaceScatterQueue)[index]; Ray ray(s.p0, s.p1 - s.p0); Float tMax = 1.f; RandomHitPayload payload; payload.wrs.Seed(Hash(s.p0, s.p1)); payload.material = s.material; uint32_t ptr0 = packPointer0(&payload), ptr1 = packPointer1(&payload); DBG("Randomhit raygen ray.o %f %f %f ray.d %f %f %f tMax %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); Trace(params.traversable, ray, 0.f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, ptr0, ptr1); if (payload.wrs.HasSample() && payload.wrs.WeightSum() > 0) { // TODO: latter check shouldn't be needed... const SubsurfaceInteraction &si = payload.wrs.GetSample(); DBG("optix si p %f %f %f n %f %f %f\n", si.p().x, si.p().y, si.p().z, si.n.x, si.n.y, si.n.z); params.subsurfaceScatterQueue->weight[index] = payload.wrs.WeightSum(); params.subsurfaceScatterQueue->ssi[index] = payload.wrs.GetSample(); } else params.subsurfaceScatterQueue->weight[index] = 0; } extern "C" __global__ void __anyhit__randomHitTriangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); RandomHitPayload *p = getPayload<RandomHitPayload>(); DBG("Anyhit triangle for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); if (rec.material == p->material) p->wrs.Add([&] PBRT_CPU_GPU() { return *getTriangleIntersection(); }, 1.f); optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__randomHitBilinearPatch() { BilinearMeshRecord &rec = *(BilinearMeshRecord *)optixGetSbtDataPointer(); RandomHitPayload *p = getPayload<RandomHitPayload>(); DBG("Anyhit blp for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); if (rec.material == p->material) p->wrs.Add( [&] PBRT_CPU_GPU() { Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1())); return getBilinearPatchIntersection(uv); }, 1.f); optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__randomHitQuadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); RandomHitPayload *p = getPayload<RandomHitPayload>(); DBG("Anyhit quadric for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); if (rec.material == p->material) { p->wrs.Add( [&] PBRT_CPU_GPU() { QuadricIntersection qi; qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()), BitsToFloat(optixGetAttribute_2())); qi.phi = BitsToFloat(optixGetAttribute_3()); return getQuadricIntersection(qi); }, 1.f); } optixIgnoreIntersection(); }
b180efbd1932ecd85b4d6e9b27c8dfb0b6a72345.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // SDSC Summer Institute 2015 // Andreas Goetz ([email protected]) // CUDA program to add two vectors in parallel on the GPU // launch all kernels at once // #include<stdio.h> // define vector length and threads per block #define N (255*2047) #define TPB 512 // // CUDA device function that adds two integer vectors // __global__ void add(int *a, int *b, int *c, int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < n) c[tid] = a[tid] + b[tid]; } // // main program // int main(void){ int h_a[N], h_b[N], h_c[N]; int *d_a, *d_b, *d_c; int size = N * sizeof(int); int i, nblock, err; // allocate device memory hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); // initialize vectors for (i=0; i<N; i++){ h_a[i] = i+1; h_b[i] = i+1; } // copy input data to device hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice); // add vectors by launching a sufficient number of blocks of the add() kernel nblock = (N+TPB-1)/TPB; printf("\nLaunching vector addition kernel...\n"); printf("Vector length = %d\n",N); printf("Blocks = %d\n",nblock); printf("Threads per block = %d\n",TPB); printf("Kernel copies = %d\n",nblock*TPB); hipLaunchKernelGGL(( add), dim3(nblock),dim3(TPB), 0, 0, d_a, d_b, d_c, N); // copy results back to host hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost); // deallocate memory hipFree(d_a); hipFree(d_b); hipFree(d_c); // check results err = 0; for (i=0; i<N; i++){ if (h_c[i] != 2*(i+1)) err = 1; } if (err != 0){ printf("\n Error, %d elements do not match!\n\n", err); } else { printf("\n Success! All elements match.\n\n"); } return 0; }
b180efbd1932ecd85b4d6e9b27c8dfb0b6a72345.cu
// SDSC Summer Institute 2015 // Andreas Goetz ([email protected]) // CUDA program to add two vectors in parallel on the GPU // launch all kernels at once // #include<stdio.h> // define vector length and threads per block #define N (255*2047) #define TPB 512 // // CUDA device function that adds two integer vectors // __global__ void add(int *a, int *b, int *c, int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < n) c[tid] = a[tid] + b[tid]; } // // main program // int main(void){ int h_a[N], h_b[N], h_c[N]; int *d_a, *d_b, *d_c; int size = N * sizeof(int); int i, nblock, err; // allocate device memory cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // initialize vectors for (i=0; i<N; i++){ h_a[i] = i+1; h_b[i] = i+1; } // copy input data to device cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice); // add vectors by launching a sufficient number of blocks of the add() kernel nblock = (N+TPB-1)/TPB; printf("\nLaunching vector addition kernel...\n"); printf("Vector length = %d\n",N); printf("Blocks = %d\n",nblock); printf("Threads per block = %d\n",TPB); printf("Kernel copies = %d\n",nblock*TPB); add<<<nblock,TPB>>>(d_a, d_b, d_c, N); // copy results back to host cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost); // deallocate memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // check results err = 0; for (i=0; i<N; i++){ if (h_c[i] != 2*(i+1)) err = 1; } if (err != 0){ printf("\n Error, %d elements do not match!\n\n", err); } else { printf("\n Success! All elements match.\n\n"); } return 0; }
6cafc13184e3952243dc378abc3d952cf20e1562.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "tmwtypes.h" // Note: on GTX Titan Black, optimal number of threads/images is 64 (Ni = 64). // Set isevenX = 1 if kernel is even in X, iseven = 0 if odd. __global__ void superconv2(double *Mout, const double *M, const double *K, const int32_T Nmx, const int32_T Nmy, const int32_T Nkx, const int32_T Nky, const int32_T Nkxh, const int32_T Nkyh, const int32_T isevenX, const int32_T isevenY) { int32_T x = blockIdx.x; // row of output pixel int32_T y = blockIdx.y; // column of output pixel int32_T X = gridDim.x; // map output width in X int32_T Y = gridDim.y; // map output height in Y //int32_T Zk = blockDim.x; // number of 2D kernels (kernel depth) int32_T zk = threadIdx.x; // map number (3rd dimension of M array) int32_T mx = x + Nkxh; int32_T my = y + Nkyh; double res = 0; int32_T i, j; #pragma unroll 10 for (i=-Nkxh; i<=Nkxh-isevenX; i++) { #pragma unroll 10 for (j=-Nkyh; j<=Nkyh-isevenY; j++) { // loop only over 1st and 2nd dimensions res += K[Nkx*Nky*zk + (j+Nkyh)*Nkx + (i+Nkxh)] * M[Nmx*Nmy*zk + Nmx*(my+j) + (mx+i)]; } } // (Nm-Nk+1, Nm-Nk+1, Nkz, Ni) Mout[X*Y*zk + X*y + x] = res; }
6cafc13184e3952243dc378abc3d952cf20e1562.cu
#include "tmwtypes.h" // Note: on GTX Titan Black, optimal number of threads/images is 64 (Ni = 64). // Set isevenX = 1 if kernel is even in X, iseven = 0 if odd. __global__ void superconv2(double *Mout, const double *M, const double *K, const int32_T Nmx, const int32_T Nmy, const int32_T Nkx, const int32_T Nky, const int32_T Nkxh, const int32_T Nkyh, const int32_T isevenX, const int32_T isevenY) { int32_T x = blockIdx.x; // row of output pixel int32_T y = blockIdx.y; // column of output pixel int32_T X = gridDim.x; // map output width in X int32_T Y = gridDim.y; // map output height in Y //int32_T Zk = blockDim.x; // number of 2D kernels (kernel depth) int32_T zk = threadIdx.x; // map number (3rd dimension of M array) int32_T mx = x + Nkxh; int32_T my = y + Nkyh; double res = 0; int32_T i, j; #pragma unroll 10 for (i=-Nkxh; i<=Nkxh-isevenX; i++) { #pragma unroll 10 for (j=-Nkyh; j<=Nkyh-isevenY; j++) { // loop only over 1st and 2nd dimensions res += K[Nkx*Nky*zk + (j+Nkyh)*Nkx + (i+Nkxh)] * M[Nmx*Nmy*zk + Nmx*(my+j) + (mx+i)]; } } // (Nm-Nk+1, Nm-Nk+1, Nkz, Ni) Mout[X*Y*zk + X*y + x] = res; }
fa954b3a6b3b58582cc72cfef6a4eed2c4735fe9.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <string> #include <vector> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "../common/cutil_math.h" #include "../common/geometry.h" #include "../common/photon.h" #include "../cuda_common/cudaHelpers.h" #include "../cuda_common/gpuBvh.h" #include "../cuda_common/gpuScene.h" #include "../cuda_common/kd-tree.h" #define WIDTH 500 #define HEIGHT 500 #define NUM_PHOTONS (1<<20) #define MAX_DEPTH 10 enum ScatterType {DIFFUSE, SPECULAR, ABSORBED}; /* * Triangle2 and createScene are very temporary in order to hgave a simple scene to do photon tracing. * Delete these and use obj_parser when it is done */ struct Triangle2 { float3 a, b, c; Material material; Triangle toTriangle() { return Triangle::from3Points(a, b, c, material); } }; SceneInfo createScene(){ std::vector<Triangle> triangles;// = loadTriangles("models/bun_zipper.ply", Material{ {0,1,1}, {0.8f, 0.8f, 0.8f}, 2.5f, 0x0000 }); Scene scene; Triangle2 t; // Back wall t.a = { -1, 1, 1 }; t.b = { 1, 1, 1 }; t.c = { -1, -1, 1 }; t.material = { { 1, 1, 1 }, // color { 0, 0, 0 }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); t.a = { 1, 1, 1 }; t.b = { 1, -1, 1 }; t.c = { -1, -1, 1 }; t.material = { { 1, 1, 1 }, // color { 0, 0, 0 }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); // Front wall t.a = { -1, 1, -4 }; t.b = { 1, 1, -4 }; t.c = { -1, -1, -4 }; t.material = { { 0, 1, 0 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); t.a = { 1, 1, -4 }; t.b = { 1, -1, -4 }; t.c = { -1, -1, -4 }; t.material = { { 0, 1, 0 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); // Left wall t.a = { -1, -1, 1 }; t.b = { -1, 1, 1 }; t.c = { -1, -1, -4 }; t.material = { { 0, 0, 1 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); t.a = { -1, 1, 1 }; t.b = { -1, 1, -4 }; t.c = { -1, -1, -4 }; t.material = { { 0, 0, 1 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); // Right wall t.a = { 1, -1, 1 }; t.b = { 1, 1, 1 }; t.c = { 1, -1, -4 }; t.material = { { 1, 1, 0 }, // color { 0, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); t.a = { 1, 1, 1 }; t.b = { 1, 1, -4 }; t.c = { 1, -1, -4 }; t.material = { { 1, 1, 0 }, // color { 0.f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); // Top wall t.a = { -1, 1, 1 }; t.b = { 1, 1, 1 }; t.c = { -1, 1, -4 }; t.material = { { 0, 1, 1 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); t.a = { 1, 1, 1 }; t.b = { 1, 1, -4 }; t.c = { -1, 1, -4 }; t.material = { { 0, 1, 1 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); // Bottom wall t.a = { -1, -1, 1 }; t.b = { 1, -1, 1 }; t.c = { -1, -1, -4 }; t.material = { { 1, 0, 1 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); t.a = { 1, -1, 1 }; t.b = { 1, -1, -4 }; t.c = { -1, -1, -4 }; t.material = { { 1, 0, 1 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); scene.triangles = std::move(triangles); scene.lights.push_back(PointLightSource{ {0, 0.0f, 0}, {1000000, 1000000, 1000000}, }); return SceneInfo::fromScene(scene); } /* * CUDA kernel to trace photons. Geometry is defined by scene and output is stored in photonList. */ __global__ void getPhotonsKernel(SceneInfo scene, Photon* photonList) { uint idx = blockIdx.x*blockDim.x + threadIdx.x; //gpu thread index hiprandState_t randState; hiprand_init(idx, 0, 10, &randState); //the 10 is an offset which seems to fix banding but more investigation is needed float cosPhi = hiprand_uniform(&randState) * 2 - 1; float sinPhi = sqrtf(1 - cosPhi*cosPhi); float theta = hiprand_uniform(&randState) * 2 * M_PI; float3 origin = scene.lights[0].position; float3 direction = make_float3(sinPhi*cosf(theta), sinPhi*sinf(theta), cosPhi); Ray ray = { origin, direction }; float3 color = scene.lights[0].intensity; int count = 0; for (uint depth = MAX_DEPTH; depth > 0; --depth) { float t = 1e20; Triangle* tri = scene.triangleBvh.intersectRay(ray, t); if (tri) { Material& mat = tri->material; float3 diffuse = mat.color; float3 specular = mat.specularReflectivity; float d_avg = (diffuse.x + diffuse.y + diffuse.z) / 3; float s_avg = (specular.x + specular.y + specular.z) / 3; float xi = hiprand_uniform(&randState); ScatterType action; if (xi < d_avg) { action = DIFFUSE; } else if (xi < d_avg + s_avg) { action = SPECULAR; } else { action = ABSORBED; } if (t < 1e19) { ray.origin += direction*t; if ((action == DIFFUSE || action == ABSORBED)) { photonList[MAX_DEPTH*idx + count].pos = ray.origin; photonList[MAX_DEPTH*idx + count].power = color / NUM_PHOTONS; auto p = photonList[MAX_DEPTH*idx + count].power; printf("%f,%f,%f\n", p.x, p.y, p.z); ++count; } } else { // hit nothing break; } float3 normal = tri->normal; normal = normalize(normal); if (action == DIFFUSE) { cosPhi = hiprand_uniform(&randState); sinPhi = sqrtf(1 - cosPhi*cosPhi); theta = hiprand_uniform(&randState) * 2 * M_PI; float3 w = normal; float3 u = normalize(cross((fabs(w.x) > 0.0001 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = cross(w, u); direction = normalize(u*cosf(theta)*sinPhi + v*sinf(theta)*sinPhi + w*cosPhi); color *= diffuse; } else if (action == SPECULAR) { ray.dir = direction - 2 * normal*dot(normal, direction); color *= specular; } else {//absorbed break; } } else { break; } } for (uint i = 0; i < count; ++i) { photonList[MAX_DEPTH*idx + i].power /= count; } } /* * Simple functon to get some idea of if photon tracing is working correctly. Simply projects all * photons in photons to the screen. Also clips to [-1,1]^3. Saves a .ppm image to filename. */ void writeTestToFile(std::vector<Photon> photons, std::string filename) { std::vector<int> sums(3 * WIDTH*HEIGHT); for (Photon photon : photons) { if (photon.pos.x >= -1 && photon.pos.y >= -1 && photon.pos.z >= -1 && photon.pos.x <= 1 && photon.pos.y <= 1 && photon.pos.z <= 1) { int x = photon.pos.x / 2 / (-photon.pos.z + 2)*WIDTH + WIDTH / 2; int y = -photon.pos.y / 2 / (-photon.pos.z + 2)*HEIGHT + HEIGHT / 2; uint idx = y*WIDTH + x; float theta = atan(sqrt(photon.pos.x*photon.pos.x + photon.pos.y*photon.pos.y) / (photon.pos.z + 2)); sums[3 * idx + 0] += (int)255 * photon.power.x; sums[3 * idx + 1] += (int)255 * photon.power.y; sums[3 * idx + 2] += (int)255 * photon.power.z; } } std::vector<char> pixels(3 * WIDTH*HEIGHT); for (uint i = 0; i < WIDTH*HEIGHT; ++i) { pixels[3 * i + 0] = clamp(sums[3 * i + 0], 0, 255); pixels[3 * i + 1] = clamp(sums[3 * i + 1], 0, 255); pixels[3 * i + 2] = clamp(sums[3 * i + 2], 0, 255); } std::ofstream file; file.open(filename, std::ios::out | std::ios::binary); if (!file.is_open()) { std::cerr << "Unable to save file" << std::endl; exit(1); } file << "P6\n" << WIDTH << " " << HEIGHT << "\n" << "255\n"; file.write(pixels.data(), pixels.size() * sizeof(char)); file.close(); } int main(){ std::vector<Photon> photonList_h(NUM_PHOTONS*MAX_DEPTH); Photon* photonList_d; checkCudaError(hipMalloc(&photonList_d, photonList_h.size()*sizeof(Photon))); SceneInfo scene = createScene(); hipLaunchKernelGGL(( getPhotonsKernel), dim3(NUM_PHOTONS/64), dim3(64), 0, 0, scene, photonList_d); checkCudaError(hipGetLastError()); hipDeviceSynchronize(); hipMemcpy(photonList_h.data(), photonList_d, photonList_h.size()*sizeof(Photon), hipMemcpyDeviceToHost); hipFree(photonList_d); writeTestToFile(photonList_h, "test.ppm"); //sortPhotons(photonList_h); #ifdef WIN32 std::cin.ignore(); #endif return 0; }
fa954b3a6b3b58582cc72cfef6a4eed2c4735fe9.cu
#include <iostream> #include <fstream> #include <string> #include <vector> #include <cuda_runtime.h> #include <curand.h> #include <curand_kernel.h> #include "../common/cutil_math.h" #include "../common/geometry.h" #include "../common/photon.h" #include "../cuda_common/cudaHelpers.h" #include "../cuda_common/gpuBvh.h" #include "../cuda_common/gpuScene.h" #include "../cuda_common/kd-tree.h" #define WIDTH 500 #define HEIGHT 500 #define NUM_PHOTONS (1<<20) #define MAX_DEPTH 10 enum ScatterType {DIFFUSE, SPECULAR, ABSORBED}; /* * Triangle2 and createScene are very temporary in order to hgave a simple scene to do photon tracing. * Delete these and use obj_parser when it is done */ struct Triangle2 { float3 a, b, c; Material material; Triangle toTriangle() { return Triangle::from3Points(a, b, c, material); } }; SceneInfo createScene(){ std::vector<Triangle> triangles;// = loadTriangles("models/bun_zipper.ply", Material{ {0,1,1}, {0.8f, 0.8f, 0.8f}, 2.5f, 0x0000 }); Scene scene; Triangle2 t; // Back wall t.a = { -1, 1, 1 }; t.b = { 1, 1, 1 }; t.c = { -1, -1, 1 }; t.material = { { 1, 1, 1 }, // color { 0, 0, 0 }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); t.a = { 1, 1, 1 }; t.b = { 1, -1, 1 }; t.c = { -1, -1, 1 }; t.material = { { 1, 1, 1 }, // color { 0, 0, 0 }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); // Front wall t.a = { -1, 1, -4 }; t.b = { 1, 1, -4 }; t.c = { -1, -1, -4 }; t.material = { { 0, 1, 0 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); t.a = { 1, 1, -4 }; t.b = { 1, -1, -4 }; t.c = { -1, -1, -4 }; t.material = { { 0, 1, 0 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); // Left wall t.a = { -1, -1, 1 }; t.b = { -1, 1, 1 }; t.c = { -1, -1, -4 }; t.material = { { 0, 0, 1 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); t.a = { -1, 1, 1 }; t.b = { -1, 1, -4 }; t.c = { -1, -1, -4 }; t.material = { { 0, 0, 1 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); // Right wall t.a = { 1, -1, 1 }; t.b = { 1, 1, 1 }; t.c = { 1, -1, -4 }; t.material = { { 1, 1, 0 }, // color { 0, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); t.a = { 1, 1, 1 }; t.b = { 1, 1, -4 }; t.c = { 1, -1, -4 }; t.material = { { 1, 1, 0 }, // color { 0.f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); // Top wall t.a = { -1, 1, 1 }; t.b = { 1, 1, 1 }; t.c = { -1, 1, -4 }; t.material = { { 0, 1, 1 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); t.a = { 1, 1, 1 }; t.b = { 1, 1, -4 }; t.c = { -1, 1, -4 }; t.material = { { 0, 1, 1 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); // Bottom wall t.a = { -1, -1, 1 }; t.b = { 1, -1, 1 }; t.c = { -1, -1, -4 }; t.material = { { 1, 0, 1 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); t.a = { 1, -1, 1 }; t.b = { 1, -1, -4 }; t.c = { -1, -1, -4 }; t.material = { { 1, 0, 1 }, // color { 0.0f, 0.0f, 0.0f }, // reflectivity 2.5, // refractive index (diamond) 0x0000 // type }; triangles.push_back(t.toTriangle()); scene.triangles = std::move(triangles); scene.lights.push_back(PointLightSource{ {0, 0.0f, 0}, {1000000, 1000000, 1000000}, }); return SceneInfo::fromScene(scene); } /* * CUDA kernel to trace photons. Geometry is defined by scene and output is stored in photonList. */ __global__ void getPhotonsKernel(SceneInfo scene, Photon* photonList) { uint idx = blockIdx.x*blockDim.x + threadIdx.x; //gpu thread index curandState randState; curand_init(idx, 0, 10, &randState); //the 10 is an offset which seems to fix banding but more investigation is needed float cosPhi = curand_uniform(&randState) * 2 - 1; float sinPhi = sqrtf(1 - cosPhi*cosPhi); float theta = curand_uniform(&randState) * 2 * M_PI; float3 origin = scene.lights[0].position; float3 direction = make_float3(sinPhi*cosf(theta), sinPhi*sinf(theta), cosPhi); Ray ray = { origin, direction }; float3 color = scene.lights[0].intensity; int count = 0; for (uint depth = MAX_DEPTH; depth > 0; --depth) { float t = 1e20; Triangle* tri = scene.triangleBvh.intersectRay(ray, t); if (tri) { Material& mat = tri->material; float3 diffuse = mat.color; float3 specular = mat.specularReflectivity; float d_avg = (diffuse.x + diffuse.y + diffuse.z) / 3; float s_avg = (specular.x + specular.y + specular.z) / 3; float xi = curand_uniform(&randState); ScatterType action; if (xi < d_avg) { action = DIFFUSE; } else if (xi < d_avg + s_avg) { action = SPECULAR; } else { action = ABSORBED; } if (t < 1e19) { ray.origin += direction*t; if ((action == DIFFUSE || action == ABSORBED)) { photonList[MAX_DEPTH*idx + count].pos = ray.origin; photonList[MAX_DEPTH*idx + count].power = color / NUM_PHOTONS; auto p = photonList[MAX_DEPTH*idx + count].power; printf("%f,%f,%f\n", p.x, p.y, p.z); ++count; } } else { // hit nothing break; } float3 normal = tri->normal; normal = normalize(normal); if (action == DIFFUSE) { cosPhi = curand_uniform(&randState); sinPhi = sqrtf(1 - cosPhi*cosPhi); theta = curand_uniform(&randState) * 2 * M_PI; float3 w = normal; float3 u = normalize(cross((fabs(w.x) > 0.0001 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = cross(w, u); direction = normalize(u*cosf(theta)*sinPhi + v*sinf(theta)*sinPhi + w*cosPhi); color *= diffuse; } else if (action == SPECULAR) { ray.dir = direction - 2 * normal*dot(normal, direction); color *= specular; } else {//absorbed break; } } else { break; } } for (uint i = 0; i < count; ++i) { photonList[MAX_DEPTH*idx + i].power /= count; } } /* * Simple functon to get some idea of if photon tracing is working correctly. Simply projects all * photons in photons to the screen. Also clips to [-1,1]^3. Saves a .ppm image to filename. */ void writeTestToFile(std::vector<Photon> photons, std::string filename) { std::vector<int> sums(3 * WIDTH*HEIGHT); for (Photon photon : photons) { if (photon.pos.x >= -1 && photon.pos.y >= -1 && photon.pos.z >= -1 && photon.pos.x <= 1 && photon.pos.y <= 1 && photon.pos.z <= 1) { int x = photon.pos.x / 2 / (-photon.pos.z + 2)*WIDTH + WIDTH / 2; int y = -photon.pos.y / 2 / (-photon.pos.z + 2)*HEIGHT + HEIGHT / 2; uint idx = y*WIDTH + x; float theta = atan(sqrt(photon.pos.x*photon.pos.x + photon.pos.y*photon.pos.y) / (photon.pos.z + 2)); sums[3 * idx + 0] += (int)255 * photon.power.x; sums[3 * idx + 1] += (int)255 * photon.power.y; sums[3 * idx + 2] += (int)255 * photon.power.z; } } std::vector<char> pixels(3 * WIDTH*HEIGHT); for (uint i = 0; i < WIDTH*HEIGHT; ++i) { pixels[3 * i + 0] = clamp(sums[3 * i + 0], 0, 255); pixels[3 * i + 1] = clamp(sums[3 * i + 1], 0, 255); pixels[3 * i + 2] = clamp(sums[3 * i + 2], 0, 255); } std::ofstream file; file.open(filename, std::ios::out | std::ios::binary); if (!file.is_open()) { std::cerr << "Unable to save file" << std::endl; exit(1); } file << "P6\n" << WIDTH << " " << HEIGHT << "\n" << "255\n"; file.write(pixels.data(), pixels.size() * sizeof(char)); file.close(); } int main(){ std::vector<Photon> photonList_h(NUM_PHOTONS*MAX_DEPTH); Photon* photonList_d; checkCudaError(cudaMalloc(&photonList_d, photonList_h.size()*sizeof(Photon))); SceneInfo scene = createScene(); getPhotonsKernel<<<NUM_PHOTONS/64, 64>>>(scene, photonList_d); checkCudaError(cudaGetLastError()); cudaDeviceSynchronize(); cudaMemcpy(photonList_h.data(), photonList_d, photonList_h.size()*sizeof(Photon), cudaMemcpyDeviceToHost); cudaFree(photonList_d); writeTestToFile(photonList_h, "test.ppm"); //sortPhotons(photonList_h); #ifdef WIN32 std::cin.ignore(); #endif return 0; }
1a900965ddcb42e402fd0d719d7e245a55b5a417.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* All modification made by Intel Corporation: 2016 Intel Corporation All contributions by the University of California: Copyright (c) 2014, 2015, The Regents of the University of California (Regents) All rights reserved. All other contributions: Copyright (c) 2014, 2015, the respective contributors All rights reserved. For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <vector> #include "caffe/filler.hpp" #include "caffe/layers/bias_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void BiasForward(const int n, const Dtype* in, const Dtype* bias, const int bias_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int bias_index = (index / inner_dim) % bias_dim; out[index] = in[index] + bias[bias_index]; } } template <typename Dtype> void BiasLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bias_data = ((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); BiasForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bias_data, bias_dim_, inner_dim_, top_data); } template <typename Dtype> void BiasLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0] && bottom[0] != top[0]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_copy(bottom[0]->count(), top_diff, bottom_diff); } // in-place, we don't need to do anything with the data diff const bool bias_param = (bottom.size() == 1); if ((!bias_param && propagate_down[1]) || (bias_param && this->param_propagate_down_[0])) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bias_diff = (bias_param ? this->blobs_[0].get() : bottom[1]) ->mutable_gpu_diff(); bool accum = bias_param; for (int n = 0; n < outer_dim_; ++n) { caffe_gpu_gemv(CblasNoTrans, bias_dim_, inner_dim_, Dtype(1), top_diff, bias_multiplier_.gpu_data(), Dtype(accum), bias_diff); top_diff += dim_; accum = true; } } } INSTANTIATE_LAYER_GPU_FUNCS(BiasLayer); } // namespace caffe
1a900965ddcb42e402fd0d719d7e245a55b5a417.cu
/* All modification made by Intel Corporation: © 2016 Intel Corporation All contributions by the University of California: Copyright (c) 2014, 2015, The Regents of the University of California (Regents) All rights reserved. All other contributions: Copyright (c) 2014, 2015, the respective contributors All rights reserved. For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <vector> #include "caffe/filler.hpp" #include "caffe/layers/bias_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void BiasForward(const int n, const Dtype* in, const Dtype* bias, const int bias_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int bias_index = (index / inner_dim) % bias_dim; out[index] = in[index] + bias[bias_index]; } } template <typename Dtype> void BiasLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bias_data = ((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); BiasForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bias_data, bias_dim_, inner_dim_, top_data); } template <typename Dtype> void BiasLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0] && bottom[0] != top[0]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_copy(bottom[0]->count(), top_diff, bottom_diff); } // in-place, we don't need to do anything with the data diff const bool bias_param = (bottom.size() == 1); if ((!bias_param && propagate_down[1]) || (bias_param && this->param_propagate_down_[0])) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bias_diff = (bias_param ? this->blobs_[0].get() : bottom[1]) ->mutable_gpu_diff(); bool accum = bias_param; for (int n = 0; n < outer_dim_; ++n) { caffe_gpu_gemv(CblasNoTrans, bias_dim_, inner_dim_, Dtype(1), top_diff, bias_multiplier_.gpu_data(), Dtype(accum), bias_diff); top_diff += dim_; accum = true; } } } INSTANTIATE_LAYER_GPU_FUNCS(BiasLayer); } // namespace caffe
ac4ce72fd39af554193faaf59a2af43f0565f9fd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <getopt.h> #include <unistd.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <stdbool.h> #include <sys/time.h> #define NUM_TIMERS 8 #define EPS 1e-3 #define MS 1e+3 #define ABS_ERR(x, y) (fabsf((x) - (y)) > EPS) #define REL_ERR(x, y) ((y) == 0 || fabsf((x) - (y)) / (y) > EPS) #define BLOCK_SIZE 16 float *a1, *b1, *c1; __global__ void cuda_mat_mul(float *a, float *b, float *c, int N, int M) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; if((col<N) && (row<N)){ for(int i=0;i<M;i++){ sum += a[row*M+i]*b[i*N+col]; } c[row*N+col] = sum; } } void mat_mul(float *a, float *b, float *c, int N, int M) { /* FIXME */ hipMemcpy(a1, a, M*N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(b1, b, N*M*sizeof(float), hipMemcpyHostToDevice); dim3 block(BLOCK_SIZE,BLOCK_SIZE); dim3 grid(N/block.x, N/block.y); hipLaunchKernelGGL(( cuda_mat_mul), dim3(grid),dim3(block), 0, 0, a1,b1,c1,N,M); hipDeviceSynchronize(); hipMemcpy(c, c1, N*N*sizeof(float), hipMemcpyDeviceToHost); hipFree(a1); hipFree(b1); hipFree(c1); } void setup(int N, int M) { /* FIXME */ hipSetDevice(0); hipMalloc((void**)&a1,M*N*sizeof(float)); hipMalloc((void**)&b1,N*M*sizeof(float)); hipMalloc((void**)&c1,N*N*sizeof(float)); } ///////////////////////////////////////////////////////////////////////////////// // main routine ///////////////////////////////////////////////////////////////////////////////// static double start_time[NUM_TIMERS]; static double get_time() { struct timeval tv; gettimeofday(&tv, 0); return tv.tv_sec + tv.tv_usec * 1e-6; } void timer_start(int i) { start_time[i] = get_time(); } double timer_stop(int i) { return get_time() - start_time[i]; } static bool print_matrix = false; static bool validation = false; static void check_mat_mul(float *a, float *b, float *c, int N, int M) { bool is_valid = true; for (int i = 0; i < N; ++i) { for (int j = 0; j < N; ++j) { float ans = 0; for (int k = 0; k < M; ++k) { ans += a[i * M + k] * b[k * N + j]; } float res = c[i * N + j]; if (ABS_ERR(res, ans) && REL_ERR(res, ans)) { printf("c[%d][%d] : answer = %f, result = %f\n", i, j, ans, res); is_valid = false; } } } if (is_valid) { printf("Validation:\tSUCCESS\n"); } else { printf("Validation:\tFAILED\n"); } } static void rand_mat(float **m, size_t R, size_t C) { if (m == NULL) { printf("Unable to allocate memory for matrix.\n"); exit(EXIT_FAILURE); } *m = (float *) malloc(sizeof(float) * R * C); if (*m == NULL) { printf("Failed to allocate memory for matrix.\n"); exit(EXIT_FAILURE); } for (int i = 0; i < R; i++) { for (int j = 0; j < C; j++) { (*m)[i * C + j] = (float) rand() / RAND_MAX - 0.5; } } } static void zero_mat(float **m, size_t R, size_t C) { if (m == NULL) { printf("Unable to allocate memory for matrix.\n"); exit(EXIT_FAILURE); } *m = (float *) malloc(sizeof(float) * R * C); if (*m == NULL) { printf("Failed to allocate memory for matrix.\n"); exit(EXIT_FAILURE); } memset(*m, 0, sizeof(float) * R * C); } static void print_mat(float *m, size_t R, size_t C) { for (int i = 0; i < R; i++) { for (int j = 0; j < C; j++) { printf("%+.3f ", m[i * C + j]); } printf("\n"); } } static void print_help(const char* prog_name) { printf(" Usage: %s NDIM MDIM [-pvh]\n", prog_name); printf(" OPTIONS\n"); printf(" -p : print matrix.\n"); printf(" -v : validate matrix multiplication.\n"); printf(" -h : print this page.\n"); } static void parse_opt(int argc, char *argv[]) { int opt; while ((opt = getopt(argc, argv, "pvh")) > 0) { switch(opt) { case 'p': print_matrix = true; break; case 'v': validation = true; break; case 'h': default: print_help(argv[0]); exit(EXIT_SUCCESS); } } } int main(int argc, char *argv[]) { //=============================================================== // Command line parsing //=============================================================== if (argc < 3) { print_help(argv[0]); exit(EXIT_FAILURE); } int N = atoi(argv[1]); int M = atoi(argv[2]); if (N < 1 || M < 1) { print_help(argv[0]); exit(EXIT_FAILURE); } parse_opt(argc, argv); printf("\nProblem size:\t%d x %d x %d\n\n", N, N, M); //=============================================================== // Initialization //=============================================================== float *a = NULL; float *b = NULL; float *c = NULL; printf(" Initializing ...\t"); fflush(stdout); rand_mat(&a, N, M); rand_mat(&b, M, N); zero_mat(&c, N, N); setup(N, M); printf("done!\n"); //=============================================================== // Caculation //=============================================================== printf(" Calculating ...\t"); fflush(stdout); timer_start(0); mat_mul(a, b, c, N, M); double elapsed_time = timer_stop(0); printf("done!\n"); //=============================================================== // Print results and Validation //=============================================================== if (print_matrix) { printf("MATRIX A:\n"); print_mat(a, N, M); printf("MATRIX B:\n"); print_mat(b, M, N); printf("MATRIX C:\n"); print_mat(c, N, N); } if (validation) { printf(" Validation on.\n\n"); check_mat_mul(a, b, c, N, M); } else { printf(" Validation off.\n\n"); } printf("Elapsed time:\t%.3f sec (%.3f ms)\n\n", elapsed_time, elapsed_time * MS); return 0; }
ac4ce72fd39af554193faaf59a2af43f0565f9fd.cu
#include <stdio.h> #include <cuda.h> #include <getopt.h> #include <unistd.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <stdbool.h> #include <sys/time.h> #define NUM_TIMERS 8 #define EPS 1e-3 #define MS 1e+3 #define ABS_ERR(x, y) (fabsf((x) - (y)) > EPS) #define REL_ERR(x, y) ((y) == 0 || fabsf((x) - (y)) / (y) > EPS) #define BLOCK_SIZE 16 float *a1, *b1, *c1; __global__ void cuda_mat_mul(float *a, float *b, float *c, int N, int M) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; if((col<N) && (row<N)){ for(int i=0;i<M;i++){ sum += a[row*M+i]*b[i*N+col]; } c[row*N+col] = sum; } } void mat_mul(float *a, float *b, float *c, int N, int M) { /* FIXME */ cudaMemcpy(a1, a, M*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(b1, b, N*M*sizeof(float), cudaMemcpyHostToDevice); dim3 block(BLOCK_SIZE,BLOCK_SIZE); dim3 grid(N/block.x, N/block.y); cuda_mat_mul<<<grid,block>>>(a1,b1,c1,N,M); cudaDeviceSynchronize(); cudaMemcpy(c, c1, N*N*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(a1); cudaFree(b1); cudaFree(c1); } void setup(int N, int M) { /* FIXME */ cudaSetDevice(0); cudaMalloc((void**)&a1,M*N*sizeof(float)); cudaMalloc((void**)&b1,N*M*sizeof(float)); cudaMalloc((void**)&c1,N*N*sizeof(float)); } ///////////////////////////////////////////////////////////////////////////////// // main routine ///////////////////////////////////////////////////////////////////////////////// static double start_time[NUM_TIMERS]; static double get_time() { struct timeval tv; gettimeofday(&tv, 0); return tv.tv_sec + tv.tv_usec * 1e-6; } void timer_start(int i) { start_time[i] = get_time(); } double timer_stop(int i) { return get_time() - start_time[i]; } static bool print_matrix = false; static bool validation = false; static void check_mat_mul(float *a, float *b, float *c, int N, int M) { bool is_valid = true; for (int i = 0; i < N; ++i) { for (int j = 0; j < N; ++j) { float ans = 0; for (int k = 0; k < M; ++k) { ans += a[i * M + k] * b[k * N + j]; } float res = c[i * N + j]; if (ABS_ERR(res, ans) && REL_ERR(res, ans)) { printf("c[%d][%d] : answer = %f, result = %f\n", i, j, ans, res); is_valid = false; } } } if (is_valid) { printf("Validation:\tSUCCESS\n"); } else { printf("Validation:\tFAILED\n"); } } static void rand_mat(float **m, size_t R, size_t C) { if (m == NULL) { printf("Unable to allocate memory for matrix.\n"); exit(EXIT_FAILURE); } *m = (float *) malloc(sizeof(float) * R * C); if (*m == NULL) { printf("Failed to allocate memory for matrix.\n"); exit(EXIT_FAILURE); } for (int i = 0; i < R; i++) { for (int j = 0; j < C; j++) { (*m)[i * C + j] = (float) rand() / RAND_MAX - 0.5; } } } static void zero_mat(float **m, size_t R, size_t C) { if (m == NULL) { printf("Unable to allocate memory for matrix.\n"); exit(EXIT_FAILURE); } *m = (float *) malloc(sizeof(float) * R * C); if (*m == NULL) { printf("Failed to allocate memory for matrix.\n"); exit(EXIT_FAILURE); } memset(*m, 0, sizeof(float) * R * C); } static void print_mat(float *m, size_t R, size_t C) { for (int i = 0; i < R; i++) { for (int j = 0; j < C; j++) { printf("%+.3f ", m[i * C + j]); } printf("\n"); } } static void print_help(const char* prog_name) { printf(" Usage: %s NDIM MDIM [-pvh]\n", prog_name); printf(" OPTIONS\n"); printf(" -p : print matrix.\n"); printf(" -v : validate matrix multiplication.\n"); printf(" -h : print this page.\n"); } static void parse_opt(int argc, char *argv[]) { int opt; while ((opt = getopt(argc, argv, "pvh")) > 0) { switch(opt) { case 'p': print_matrix = true; break; case 'v': validation = true; break; case 'h': default: print_help(argv[0]); exit(EXIT_SUCCESS); } } } int main(int argc, char *argv[]) { //=============================================================== // Command line parsing //=============================================================== if (argc < 3) { print_help(argv[0]); exit(EXIT_FAILURE); } int N = atoi(argv[1]); int M = atoi(argv[2]); if (N < 1 || M < 1) { print_help(argv[0]); exit(EXIT_FAILURE); } parse_opt(argc, argv); printf("\nProblem size:\t%d x %d x %d\n\n", N, N, M); //=============================================================== // Initialization //=============================================================== float *a = NULL; float *b = NULL; float *c = NULL; printf(" Initializing ...\t"); fflush(stdout); rand_mat(&a, N, M); rand_mat(&b, M, N); zero_mat(&c, N, N); setup(N, M); printf("done!\n"); //=============================================================== // Caculation //=============================================================== printf(" Calculating ...\t"); fflush(stdout); timer_start(0); mat_mul(a, b, c, N, M); double elapsed_time = timer_stop(0); printf("done!\n"); //=============================================================== // Print results and Validation //=============================================================== if (print_matrix) { printf("MATRIX A:\n"); print_mat(a, N, M); printf("MATRIX B:\n"); print_mat(b, M, N); printf("MATRIX C:\n"); print_mat(c, N, N); } if (validation) { printf(" Validation on.\n\n"); check_mat_mul(a, b, c, N, M); } else { printf(" Validation off.\n\n"); } printf("Elapsed time:\t%.3f sec (%.3f ms)\n\n", elapsed_time, elapsed_time * MS); return 0; }
24405d3b922af21029ce13c89890b25de4805e91.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #define CSC(call) \ do { \ hipError_t res = call; \ if (res != hipSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, hipGetErrorString(res)); \ exit(0); \ } \ } while(0) __global__ void kernel(double *vector, int n) { int offset = blockDim.x * gridDim.x; for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += offset) { vector[i] *= vector[i] < 0 ? -1 : 1; } } int main() { int n; scanf("%d", &n); int size = n * sizeof(double); double *vector = (double *) malloc(size); for (int i = 0; i < n; ++i) { scanf("%lf", &vector[i]); } double *device_vector; CSC(hipMalloc(&device_vector, size)); CSC(hipMemcpy(device_vector, vector, size, hipMemcpyHostToDevice)); hipEvent_t start, end; CSC(hipEventCreate(&start)); CSC(hipEventCreate(&end)); CSC(hipEventRecord(start)); hipLaunchKernelGGL(( kernel), dim3(1024), dim3(1024), 0, 0, device_vector, n); CSC(hipGetLastError()); CSC(hipEventRecord(end)); CSC(hipEventSynchronize(end)); float time; CSC(hipEventElapsedTime(&time, start, end)); CSC(hipEventDestroy(start)); CSC(hipEventDestroy(end)); printf("Time = %f ms\n", time); CSC(hipMemcpy(vector, device_vector, size, hipMemcpyDeviceToHost)); CSC(hipFree(device_vector)); /* for (int i = 0; i < n; ++i) { printf("%.10e ", vector[i]); } */ printf("\n"); free(vector); return 0; }
24405d3b922af21029ce13c89890b25de4805e91.cu
#include <stdlib.h> #include <stdio.h> #define CSC(call) \ do { \ cudaError_t res = call; \ if (res != cudaSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, cudaGetErrorString(res)); \ exit(0); \ } \ } while(0) __global__ void kernel(double *vector, int n) { int offset = blockDim.x * gridDim.x; for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += offset) { vector[i] *= vector[i] < 0 ? -1 : 1; } } int main() { int n; scanf("%d", &n); int size = n * sizeof(double); double *vector = (double *) malloc(size); for (int i = 0; i < n; ++i) { scanf("%lf", &vector[i]); } double *device_vector; CSC(cudaMalloc(&device_vector, size)); CSC(cudaMemcpy(device_vector, vector, size, cudaMemcpyHostToDevice)); cudaEvent_t start, end; CSC(cudaEventCreate(&start)); CSC(cudaEventCreate(&end)); CSC(cudaEventRecord(start)); kernel<<<1024, 1024>>>(device_vector, n); CSC(cudaGetLastError()); CSC(cudaEventRecord(end)); CSC(cudaEventSynchronize(end)); float time; CSC(cudaEventElapsedTime(&time, start, end)); CSC(cudaEventDestroy(start)); CSC(cudaEventDestroy(end)); printf("Time = %f ms\n", time); CSC(cudaMemcpy(vector, device_vector, size, cudaMemcpyDeviceToHost)); CSC(cudaFree(device_vector)); /* for (int i = 0; i < n; ++i) { printf("%.10e ", vector[i]); } */ printf("\n"); free(vector); return 0; }
b97834e7cb204aa8091bf29ccfdaf1d21ca4dd1f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <stdint.h> #include "cuStopwatch.cu" // Compute sum of integers from 0 to n-1 __global__ void trianglenumber(uint64_t* res, uint64_t n) { uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < n) { /* Here, the old operation res += tid; is not atomic. It is necessary to add a fence to avoid edge effects. */ atomicAdd(res, tid); } return; } int main() { // Allocate memory uint64_t *res_host, *res_dev; hipHostMalloc((void**)&res_host, sizeof(uint64_t), hipHostMallocDefault); hipMalloc((void**)&res_dev, sizeof(uint64_t)); // Perform computation cuStopwatch sw1; sw1.start(); hipLaunchKernelGGL(( trianglenumber), dim3(1024), dim3(1024), 0, 0, res_dev, 1024*1024); hipMemcpyAsync(res_host, res_dev, sizeof(uint64_t), hipMemcpyDeviceToHost); printf("Computation time: %.4fms\n", sw1.stop()); printf("Result: %I64u\n", *res_host); // Free memory hipFree(res_dev); hipHostFree(res_host); return 0; }
b97834e7cb204aa8091bf29ccfdaf1d21ca4dd1f.cu
#include <stdio.h> #include <cuda_runtime.h> #include <stdint.h> #include "cuStopwatch.cu" // Compute sum of integers from 0 to n-1 __global__ void trianglenumber(uint64_t* res, uint64_t n) { uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < n) { /* Here, the old operation res += tid; is not atomic. It is necessary to add a fence to avoid edge effects. */ atomicAdd(res, tid); } return; } int main() { // Allocate memory uint64_t *res_host, *res_dev; cudaHostAlloc((void**)&res_host, sizeof(uint64_t), cudaHostAllocDefault); cudaMalloc((void**)&res_dev, sizeof(uint64_t)); // Perform computation cuStopwatch sw1; sw1.start(); trianglenumber<<<1024, 1024>>>(res_dev, 1024*1024); cudaMemcpyAsync(res_host, res_dev, sizeof(uint64_t), cudaMemcpyDeviceToHost); printf("Computation time: %.4fms\n", sw1.stop()); printf("Result: %I64u\n", *res_host); // Free memory cudaFree(res_dev); cudaFreeHost(res_host); return 0; }
2472140e74ca1d97252849379558f9219e214e20.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> __global__ void mandelKernel(int* d_img, int maxIter, float stepX, float stepY, float lowerX, float lowerY) { // To avoid error caused by the floating number, use the following pseudo code // // float x = lowerX + thisX * stepX; // float y = lowerY + thisY * stepY; int thisX = blockDim.x*blockIdx.x+threadIdx.x; //0~1599 int thisY = blockIdx.y; //0~1199 int index = thisY*1600+thisX; float x = lowerX + thisX * stepX;//-2~1 float y = lowerY + thisY * stepY;//-1~1 // int i; float z_x = x; float z_y = y; for(i=0;i<maxIter;i++){ if(z_x*z_x + z_y*z_y > 4.f) break; float new_x = z_x*z_x - z_y*z_y; float new_y = 2.f * z_x * z_y; z_x = x + new_x; z_y = y + new_y; } d_img[index] = i; } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; // int width = resX; //1600 int height = resY; //1200 int N = width*height; int *d_img,*h_img; // hipMalloc((void**)&d_img,N*sizeof(int)); h_img = (int*)malloc(sizeof(int)*N); // dim3 blockSize(400); dim3 blockNum(4,1200); hipLaunchKernelGGL(( mandelKernel), dim3(blockNum),dim3(blockSize), 0, 0, d_img, maxIterations,stepX,stepY,lowerX,lowerY); // hipDeviceSynchronize(); // hipMemcpy(img,d_img,N*sizeof(int),hipMemcpyDeviceToHost); // }
2472140e74ca1d97252849379558f9219e214e20.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> __global__ void mandelKernel(int* d_img, int maxIter, float stepX, float stepY, float lowerX, float lowerY) { // To avoid error caused by the floating number, use the following pseudo code // // float x = lowerX + thisX * stepX; // float y = lowerY + thisY * stepY; int thisX = blockDim.x*blockIdx.x+threadIdx.x; //0~1599 int thisY = blockIdx.y; //0~1199 int index = thisY*1600+thisX; float x = lowerX + thisX * stepX;//-2~1 float y = lowerY + thisY * stepY;//-1~1 // int i; float z_x = x; float z_y = y; for(i=0;i<maxIter;i++){ if(z_x*z_x + z_y*z_y > 4.f) break; float new_x = z_x*z_x - z_y*z_y; float new_y = 2.f * z_x * z_y; z_x = x + new_x; z_y = y + new_y; } d_img[index] = i; } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; // int width = resX; //1600 int height = resY; //1200 int N = width*height; int *d_img,*h_img; // cudaMalloc((void**)&d_img,N*sizeof(int)); h_img = (int*)malloc(sizeof(int)*N); // dim3 blockSize(400); dim3 blockNum(4,1200); mandelKernel<<<blockNum,blockSize>>>(d_img, maxIterations,stepX,stepY,lowerX,lowerY); // cudaDeviceSynchronize(); // cudaMemcpy(img,d_img,N*sizeof(int),cudaMemcpyDeviceToHost); // }
3755a606b5b26fa78866bae8e6e795eb948fff7e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2013, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "mem/oskar_mem_set_value_real_cuda.h" #include <hip/hip_runtime.h> /* Kernels. ================================================================ */ /* Single precision. */ __global__ void oskar_mem_set_value_real_cudak_r_f(const int num, float* data, const float value) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; data[i] = value; } __global__ void oskar_mem_set_value_real_cudak_c_f(const int num, float2* data, const float value) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; data[i] = make_float2(value, 0.0f); } __global__ void oskar_mem_set_value_real_cudak_m_f(const int num, float4c* data, const float value) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; data[i].a = make_float2(value, 0.0f); data[i].b = make_float2(0.0f, 0.0f); data[i].c = make_float2(0.0f, 0.0f); data[i].d = make_float2(value, 0.0f); } /* Double precision. */ __global__ void oskar_mem_set_value_real_cudak_r_d(const int num, double* data, const double value) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; data[i] = value; } __global__ void oskar_mem_set_value_real_cudak_c_d(const int num, double2* data, const double value) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; data[i] = make_double2(value, 0.0); } __global__ void oskar_mem_set_value_real_cudak_m_d(const int num, double4c* data, const double value) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; data[i].a = make_double2(value, 0.0); data[i].b = make_double2(0.0, 0.0); data[i].c = make_double2(0.0, 0.0); data[i].d = make_double2(value, 0.0); } #ifdef __cplusplus extern "C" { #endif /* Kernel wrappers. ======================================================== */ /* Single precision. */ void oskar_mem_set_value_real_cuda_r_f(int num, float* data, float value) { int num_blocks, num_threads = 256; num_blocks = (num + num_threads - 1) / num_threads; oskar_mem_set_value_real_cudak_r_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (num, data, value); } void oskar_mem_set_value_real_cuda_c_f(int num, float2* data, float value) { int num_blocks, num_threads = 256; num_blocks = (num + num_threads - 1) / num_threads; oskar_mem_set_value_real_cudak_c_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (num, data, value); } void oskar_mem_set_value_real_cuda_m_f(int num, float4c* data, float value) { int num_blocks, num_threads = 256; num_blocks = (num + num_threads - 1) / num_threads; oskar_mem_set_value_real_cudak_m_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (num, data, value); } /* Double precision. */ void oskar_mem_set_value_real_cuda_r_d(int num, double* data, double value) { int num_blocks, num_threads = 256; num_blocks = (num + num_threads - 1) / num_threads; oskar_mem_set_value_real_cudak_r_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (num, data, value); } void oskar_mem_set_value_real_cuda_c_d(int num, double2* data, double value) { int num_blocks, num_threads = 256; num_blocks = (num + num_threads - 1) / num_threads; oskar_mem_set_value_real_cudak_c_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (num, data, value); } void oskar_mem_set_value_real_cuda_m_d(int num, double4c* data, double value) { int num_blocks, num_threads = 256; num_blocks = (num + num_threads - 1) / num_threads; oskar_mem_set_value_real_cudak_m_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (num, data, value); } #ifdef __cplusplus } #endif
3755a606b5b26fa78866bae8e6e795eb948fff7e.cu
/* * Copyright (c) 2013, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "mem/oskar_mem_set_value_real_cuda.h" #include <cuda_runtime.h> /* Kernels. ================================================================ */ /* Single precision. */ __global__ void oskar_mem_set_value_real_cudak_r_f(const int num, float* data, const float value) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; data[i] = value; } __global__ void oskar_mem_set_value_real_cudak_c_f(const int num, float2* data, const float value) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; data[i] = make_float2(value, 0.0f); } __global__ void oskar_mem_set_value_real_cudak_m_f(const int num, float4c* data, const float value) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; data[i].a = make_float2(value, 0.0f); data[i].b = make_float2(0.0f, 0.0f); data[i].c = make_float2(0.0f, 0.0f); data[i].d = make_float2(value, 0.0f); } /* Double precision. */ __global__ void oskar_mem_set_value_real_cudak_r_d(const int num, double* data, const double value) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; data[i] = value; } __global__ void oskar_mem_set_value_real_cudak_c_d(const int num, double2* data, const double value) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; data[i] = make_double2(value, 0.0); } __global__ void oskar_mem_set_value_real_cudak_m_d(const int num, double4c* data, const double value) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; data[i].a = make_double2(value, 0.0); data[i].b = make_double2(0.0, 0.0); data[i].c = make_double2(0.0, 0.0); data[i].d = make_double2(value, 0.0); } #ifdef __cplusplus extern "C" { #endif /* Kernel wrappers. ======================================================== */ /* Single precision. */ void oskar_mem_set_value_real_cuda_r_f(int num, float* data, float value) { int num_blocks, num_threads = 256; num_blocks = (num + num_threads - 1) / num_threads; oskar_mem_set_value_real_cudak_r_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (num, data, value); } void oskar_mem_set_value_real_cuda_c_f(int num, float2* data, float value) { int num_blocks, num_threads = 256; num_blocks = (num + num_threads - 1) / num_threads; oskar_mem_set_value_real_cudak_c_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (num, data, value); } void oskar_mem_set_value_real_cuda_m_f(int num, float4c* data, float value) { int num_blocks, num_threads = 256; num_blocks = (num + num_threads - 1) / num_threads; oskar_mem_set_value_real_cudak_m_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (num, data, value); } /* Double precision. */ void oskar_mem_set_value_real_cuda_r_d(int num, double* data, double value) { int num_blocks, num_threads = 256; num_blocks = (num + num_threads - 1) / num_threads; oskar_mem_set_value_real_cudak_r_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (num, data, value); } void oskar_mem_set_value_real_cuda_c_d(int num, double2* data, double value) { int num_blocks, num_threads = 256; num_blocks = (num + num_threads - 1) / num_threads; oskar_mem_set_value_real_cudak_c_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (num, data, value); } void oskar_mem_set_value_real_cuda_m_d(int num, double4c* data, double value) { int num_blocks, num_threads = 256; num_blocks = (num + num_threads - 1) / num_threads; oskar_mem_set_value_real_cudak_m_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (num, data, value); } #ifdef __cplusplus } #endif
4db5a24912b17aef5cb3eb397848b9c83a5345c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "dag_tracer.h" #include "cuda_error_check.h" #include "memory.h" #include "tracer.h" #include "dags/basic_dag/basic_dag.h" #include "dags/hash_dag/hash_dag.h" #include "dags/hash_dag/hash_dag_colors.h" DAGTracer::DAGTracer(bool headLess) : headLess(headLess) { if (headLess) { const auto setupArray = [](auto& array, auto& buffer, auto x, auto y, auto z, auto w) { hipChannelFormatDesc desc = hipCreateChannelDesc(x, y, z, w, hipChannelFormatKindUnsigned); CUDA_CHECKED_CALL hipMallocArray(&array, &desc, imageWidth, imageHeight, hipArraySurfaceLoadStore); buffer.create_surface(array); }; setupArray(pathArray, pathsBuffer, 32, 32, 32, 32); setupArray(colorsArray, colorsBuffer, 8, 8, 8, 8); } else { const auto setupImage = [](auto& buffer, auto& image, GLint formatA, GLenum formatB, GLenum formatC) { glGenTextures(1, &image); glBindTexture(GL_TEXTURE_2D, image); glTexImage2D(GL_TEXTURE_2D, 0, formatA, (int32)imageWidth, (int32)imageHeight, 0, formatB, formatC, nullptr); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glBindTexture(GL_TEXTURE_2D, 0); buffer.register_resource(image); }; setupImage(pathsBuffer, pathsImage, GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT); setupImage(colorsBuffer, colorsImage, GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE); pathCache = Memory::malloc<uint3>("path cache", sizeof(uint3), EMemoryType::GPU_Managed); } hipEventCreate(&eventBeg); hipEventCreate(&eventEnd); } DAGTracer::~DAGTracer() { if (headLess) { pathsBuffer.destroy_surface(); colorsBuffer.destroy_surface(); hipFreeArray(pathArray); hipFreeArray(colorsArray); } else { pathsBuffer.unregister_resource(); colorsBuffer.unregister_resource(); glDeleteTextures(1, &pathsImage); glDeleteTextures(1, &colorsImage); Memory::free(pathCache); } hipEventDestroy(eventBeg); hipEventDestroy(eventEnd); } inline Tracer::TracePathsParams get_trace_params( const CameraView& camera, uint32 levels, const DAGInfo& dagInfo) { const double3 position = make_double3(camera.position); const double3 direction = make_double3(camera.forward()); const double3 up = make_double3(camera.up()); const double3 right = make_double3(camera.right()); const double3 boundsMin = make_double3(dagInfo.boundsAABBMin); const double3 boundsMax = make_double3(dagInfo.boundsAABBMax); const double fov = camera.fov / 2.0 * (double(M_PI) / 180.); const double aspect_ratio = double(imageWidth) / double(imageHeight); const double3 X = right * sin(fov) * aspect_ratio; const double3 Y = up * sin(fov); const double3 Z = direction * cos(fov); const double3 bottomLeft = position + Z - Y - X; const double3 bottomRight = position + Z - Y + X; const double3 topLeft = position + Z + Y - X; const double3 translation = -boundsMin; const double3 scale = make_double3(double(1 << levels)) / (boundsMax - boundsMin); const double3 finalPosition = (position + translation) * scale; const double3 finalBottomLeft = (bottomLeft + translation) * scale; const double3 finalTopLeft = (topLeft + translation) * scale; const double3 finalBottomRight = (bottomRight + translation) * scale; const double3 dx = (finalBottomRight - finalBottomLeft) * (1.0 / imageWidth); const double3 dy = (finalTopLeft - finalBottomLeft) * (1.0 / imageHeight); Tracer::TracePathsParams params; params.cameraPosition = finalPosition; params.rayMin = finalBottomLeft; params.rayDDx = dx; params.rayDDy = dy; return params; } template<typename TDAG> float DAGTracer::resolve_paths(const CameraView& camera, const DAGInfo& dagInfo, const TDAG& dag) { PROFILE_FUNCTION(); const dim3 block_dim = dim3(4, 64); const dim3 grid_dim = dim3(imageWidth / block_dim.x + 1, imageHeight / block_dim.y + 1); if (!headLess) pathsBuffer.map_surface(); auto traceParams = get_trace_params(camera, dag.levels, dagInfo); traceParams.pathsSurface = pathsBuffer.cudaSurface; CUDA_CHECK_ERROR(); hipEventRecord(eventBeg); hipLaunchKernelGGL(( Tracer::trace_paths) , dim3(grid_dim), dim3(block_dim), 0, 0, traceParams, dag); hipEventRecord(eventEnd); hipEventSynchronize(eventEnd); CUDA_CHECK_ERROR(); float elapsed; hipEventElapsedTime(&elapsed, eventBeg, eventEnd); CUDA_CHECK_ERROR(); if (!headLess) pathsBuffer.unmap_surface(); return elapsed; } template<typename TDAG, typename TDAGColors> float DAGTracer::resolve_colors(const TDAG& dag, const TDAGColors& colors, EDebugColors debugColors, uint32 debugColorsIndexLevel, ToolInfo toolInfo) { PROFILE_FUNCTION(); colors.check_ready_for_rt(); const dim3 block_dim = dim3(4, 64); const dim3 grid_dim = dim3(imageWidth / block_dim.x + 1, imageHeight / block_dim.y + 1); if (!headLess) pathsBuffer.map_surface(); if (!headLess) colorsBuffer.map_surface(); Tracer::TraceColorsParams traceParams; traceParams.debugColors = debugColors; traceParams.debugColorsIndexLevel = debugColorsIndexLevel; traceParams.toolInfo = toolInfo; traceParams.pathsSurface = pathsBuffer.cudaSurface; traceParams.colorsSurface = colorsBuffer.cudaSurface; CUDA_CHECK_ERROR(); hipEventRecord(eventBeg); hipLaunchKernelGGL(( Tracer::trace_colors), dim3(grid_dim), dim3(block_dim), 0, 0, traceParams, dag, colors); hipEventRecord(eventEnd); hipEventSynchronize(eventEnd); float elapsed; hipEventElapsedTime(&elapsed, eventBeg, eventEnd); CUDA_CHECK_ERROR(); if (!headLess) pathsBuffer.unmap_surface(); if (!headLess) colorsBuffer.unmap_surface(); return elapsed; } template<typename TDAG> float DAGTracer::resolve_shadows(const CameraView& camera, const DAGInfo& dagInfo, const TDAG& dag, float shadowBias, float fogDensity) { PROFILE_FUNCTION(); const dim3 block_dim = dim3(4, 64); const dim3 grid_dim = dim3(imageWidth / block_dim.x + 1, imageHeight / block_dim.y + 1); if (!headLess) pathsBuffer.map_surface(); if (!headLess) colorsBuffer.map_surface(); const auto pathParams = get_trace_params(camera, dag.levels, dagInfo); Tracer::TraceShadowsParams traceParams{ pathParams.cameraPosition, pathParams.rayMin, pathParams.rayDDx, pathParams.rayDDy, shadowBias, fogDensity, pathsBuffer.cudaSurface, colorsBuffer.cudaSurface }; CUDA_CHECK_ERROR(); hipEventRecord(eventBeg); hipLaunchKernelGGL(( Tracer::trace_shadows) , dim3(grid_dim), dim3(block_dim), 0, 0, traceParams, dag); hipEventRecord(eventEnd); hipEventSynchronize(eventEnd); float elapsed; hipEventElapsedTime(&elapsed, eventBeg, eventEnd); CUDA_CHECK_ERROR(); if (!headLess) pathsBuffer.unmap_surface(); if (!headLess) colorsBuffer.unmap_surface(); return elapsed; } template float DAGTracer::resolve_paths<BasicDAG>(const CameraView&, const DAGInfo&, const BasicDAG&); template float DAGTracer::resolve_paths<HashDAG >(const CameraView&, const DAGInfo&, const HashDAG &); template float DAGTracer::resolve_shadows<BasicDAG>(const CameraView&, const DAGInfo&, const BasicDAG&, float, float); template float DAGTracer::resolve_shadows<HashDAG >(const CameraView&, const DAGInfo&, const HashDAG &, float, float); #define COLORS_IMPL(Dag, Colors)\ template float DAGTracer::resolve_colors<Dag, Colors>(const Dag&, const Colors&, EDebugColors, uint32, ToolInfo); COLORS_IMPL(BasicDAG, BasicDAGUncompressedColors) COLORS_IMPL(BasicDAG, BasicDAGCompressedColors) COLORS_IMPL(BasicDAG, BasicDAGColorErrors) COLORS_IMPL(HashDAG, HashDAGColors) __global__ void read_path(uint32 x, uint32 y, hipSurfaceObject_t surface, uint3* output) { *output = make_uint3(surf2Dread<uint4>(surface, x * sizeof(uint4), y)); } uint3 DAGTracer::get_path(uint32 posX, uint32 posY) { PROFILE_FUNCTION(); if (headLess) return {}; check(posX < imageWidth); check(posY < imageHeight); pathsBuffer.map_surface(); CUDA_CHECK_ERROR(); hipLaunchKernelGGL(( read_path), dim3(1),dim3(1), 0, 0, posX, posY, pathsBuffer.cudaSurface, pathCache); CUDA_CHECK_ERROR(); if (!headLess) pathsBuffer.unmap_surface(); return *pathCache; }
4db5a24912b17aef5cb3eb397848b9c83a5345c1.cu
#include "dag_tracer.h" #include "cuda_error_check.h" #include "memory.h" #include "tracer.h" #include "dags/basic_dag/basic_dag.h" #include "dags/hash_dag/hash_dag.h" #include "dags/hash_dag/hash_dag_colors.h" DAGTracer::DAGTracer(bool headLess) : headLess(headLess) { if (headLess) { const auto setupArray = [](auto& array, auto& buffer, auto x, auto y, auto z, auto w) { cudaChannelFormatDesc desc = cudaCreateChannelDesc(x, y, z, w, cudaChannelFormatKindUnsigned); CUDA_CHECKED_CALL cudaMallocArray(&array, &desc, imageWidth, imageHeight, cudaArraySurfaceLoadStore); buffer.create_surface(array); }; setupArray(pathArray, pathsBuffer, 32, 32, 32, 32); setupArray(colorsArray, colorsBuffer, 8, 8, 8, 8); } else { const auto setupImage = [](auto& buffer, auto& image, GLint formatA, GLenum formatB, GLenum formatC) { glGenTextures(1, &image); glBindTexture(GL_TEXTURE_2D, image); glTexImage2D(GL_TEXTURE_2D, 0, formatA, (int32)imageWidth, (int32)imageHeight, 0, formatB, formatC, nullptr); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glBindTexture(GL_TEXTURE_2D, 0); buffer.register_resource(image); }; setupImage(pathsBuffer, pathsImage, GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT); setupImage(colorsBuffer, colorsImage, GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE); pathCache = Memory::malloc<uint3>("path cache", sizeof(uint3), EMemoryType::GPU_Managed); } cudaEventCreate(&eventBeg); cudaEventCreate(&eventEnd); } DAGTracer::~DAGTracer() { if (headLess) { pathsBuffer.destroy_surface(); colorsBuffer.destroy_surface(); cudaFreeArray(pathArray); cudaFreeArray(colorsArray); } else { pathsBuffer.unregister_resource(); colorsBuffer.unregister_resource(); glDeleteTextures(1, &pathsImage); glDeleteTextures(1, &colorsImage); Memory::free(pathCache); } cudaEventDestroy(eventBeg); cudaEventDestroy(eventEnd); } inline Tracer::TracePathsParams get_trace_params( const CameraView& camera, uint32 levels, const DAGInfo& dagInfo) { const double3 position = make_double3(camera.position); const double3 direction = make_double3(camera.forward()); const double3 up = make_double3(camera.up()); const double3 right = make_double3(camera.right()); const double3 boundsMin = make_double3(dagInfo.boundsAABBMin); const double3 boundsMax = make_double3(dagInfo.boundsAABBMax); const double fov = camera.fov / 2.0 * (double(M_PI) / 180.); const double aspect_ratio = double(imageWidth) / double(imageHeight); const double3 X = right * sin(fov) * aspect_ratio; const double3 Y = up * sin(fov); const double3 Z = direction * cos(fov); const double3 bottomLeft = position + Z - Y - X; const double3 bottomRight = position + Z - Y + X; const double3 topLeft = position + Z + Y - X; const double3 translation = -boundsMin; const double3 scale = make_double3(double(1 << levels)) / (boundsMax - boundsMin); const double3 finalPosition = (position + translation) * scale; const double3 finalBottomLeft = (bottomLeft + translation) * scale; const double3 finalTopLeft = (topLeft + translation) * scale; const double3 finalBottomRight = (bottomRight + translation) * scale; const double3 dx = (finalBottomRight - finalBottomLeft) * (1.0 / imageWidth); const double3 dy = (finalTopLeft - finalBottomLeft) * (1.0 / imageHeight); Tracer::TracePathsParams params; params.cameraPosition = finalPosition; params.rayMin = finalBottomLeft; params.rayDDx = dx; params.rayDDy = dy; return params; } template<typename TDAG> float DAGTracer::resolve_paths(const CameraView& camera, const DAGInfo& dagInfo, const TDAG& dag) { PROFILE_FUNCTION(); const dim3 block_dim = dim3(4, 64); const dim3 grid_dim = dim3(imageWidth / block_dim.x + 1, imageHeight / block_dim.y + 1); if (!headLess) pathsBuffer.map_surface(); auto traceParams = get_trace_params(camera, dag.levels, dagInfo); traceParams.pathsSurface = pathsBuffer.cudaSurface; CUDA_CHECK_ERROR(); cudaEventRecord(eventBeg); Tracer::trace_paths <<<grid_dim, block_dim>>> (traceParams, dag); cudaEventRecord(eventEnd); cudaEventSynchronize(eventEnd); CUDA_CHECK_ERROR(); float elapsed; cudaEventElapsedTime(&elapsed, eventBeg, eventEnd); CUDA_CHECK_ERROR(); if (!headLess) pathsBuffer.unmap_surface(); return elapsed; } template<typename TDAG, typename TDAGColors> float DAGTracer::resolve_colors(const TDAG& dag, const TDAGColors& colors, EDebugColors debugColors, uint32 debugColorsIndexLevel, ToolInfo toolInfo) { PROFILE_FUNCTION(); colors.check_ready_for_rt(); const dim3 block_dim = dim3(4, 64); const dim3 grid_dim = dim3(imageWidth / block_dim.x + 1, imageHeight / block_dim.y + 1); if (!headLess) pathsBuffer.map_surface(); if (!headLess) colorsBuffer.map_surface(); Tracer::TraceColorsParams traceParams; traceParams.debugColors = debugColors; traceParams.debugColorsIndexLevel = debugColorsIndexLevel; traceParams.toolInfo = toolInfo; traceParams.pathsSurface = pathsBuffer.cudaSurface; traceParams.colorsSurface = colorsBuffer.cudaSurface; CUDA_CHECK_ERROR(); cudaEventRecord(eventBeg); Tracer::trace_colors<<<grid_dim, block_dim>>>(traceParams, dag, colors); cudaEventRecord(eventEnd); cudaEventSynchronize(eventEnd); float elapsed; cudaEventElapsedTime(&elapsed, eventBeg, eventEnd); CUDA_CHECK_ERROR(); if (!headLess) pathsBuffer.unmap_surface(); if (!headLess) colorsBuffer.unmap_surface(); return elapsed; } template<typename TDAG> float DAGTracer::resolve_shadows(const CameraView& camera, const DAGInfo& dagInfo, const TDAG& dag, float shadowBias, float fogDensity) { PROFILE_FUNCTION(); const dim3 block_dim = dim3(4, 64); const dim3 grid_dim = dim3(imageWidth / block_dim.x + 1, imageHeight / block_dim.y + 1); if (!headLess) pathsBuffer.map_surface(); if (!headLess) colorsBuffer.map_surface(); const auto pathParams = get_trace_params(camera, dag.levels, dagInfo); Tracer::TraceShadowsParams traceParams{ pathParams.cameraPosition, pathParams.rayMin, pathParams.rayDDx, pathParams.rayDDy, shadowBias, fogDensity, pathsBuffer.cudaSurface, colorsBuffer.cudaSurface }; CUDA_CHECK_ERROR(); cudaEventRecord(eventBeg); Tracer::trace_shadows <<<grid_dim, block_dim>>> (traceParams, dag); cudaEventRecord(eventEnd); cudaEventSynchronize(eventEnd); float elapsed; cudaEventElapsedTime(&elapsed, eventBeg, eventEnd); CUDA_CHECK_ERROR(); if (!headLess) pathsBuffer.unmap_surface(); if (!headLess) colorsBuffer.unmap_surface(); return elapsed; } template float DAGTracer::resolve_paths<BasicDAG>(const CameraView&, const DAGInfo&, const BasicDAG&); template float DAGTracer::resolve_paths<HashDAG >(const CameraView&, const DAGInfo&, const HashDAG &); template float DAGTracer::resolve_shadows<BasicDAG>(const CameraView&, const DAGInfo&, const BasicDAG&, float, float); template float DAGTracer::resolve_shadows<HashDAG >(const CameraView&, const DAGInfo&, const HashDAG &, float, float); #define COLORS_IMPL(Dag, Colors)\ template float DAGTracer::resolve_colors<Dag, Colors>(const Dag&, const Colors&, EDebugColors, uint32, ToolInfo); COLORS_IMPL(BasicDAG, BasicDAGUncompressedColors) COLORS_IMPL(BasicDAG, BasicDAGCompressedColors) COLORS_IMPL(BasicDAG, BasicDAGColorErrors) COLORS_IMPL(HashDAG, HashDAGColors) __global__ void read_path(uint32 x, uint32 y, cudaSurfaceObject_t surface, uint3* output) { *output = make_uint3(surf2Dread<uint4>(surface, x * sizeof(uint4), y)); } uint3 DAGTracer::get_path(uint32 posX, uint32 posY) { PROFILE_FUNCTION(); if (headLess) return {}; check(posX < imageWidth); check(posY < imageHeight); pathsBuffer.map_surface(); CUDA_CHECK_ERROR(); read_path<<<1,1>>>(posX, posY, pathsBuffer.cudaSurface, pathCache); CUDA_CHECK_ERROR(); if (!headLess) pathsBuffer.unmap_surface(); return *pathCache; }
a09ea539f755816726a01a5689cf7d735f6070fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <unistd.h> #include <sys/time.h> using namespace std; // Shorthand for formatting and printing usage options to stderr #define fpe(msg) fprintf(stderr, "\t%s\n", msg); // Shorthand for handling CUDA errors. #define HANDLE_ERROR(err) ( HandleError( err, __FILE__, __LINE__ ) ) /** * DEFINED VALUES HERE */ #define TILE_WIDTH 64 #define TILE_HEIGHT 8 #define TILE_DEPTH 1 #define TILE_AGE 2 #define PER_THREAD_X 2 #define PER_THREAD_Y 2 #define PER_THREAD_Z 1 /***************** * CUDA Utilites * *****************/ void HandleError(hipError_t err, const char *file, int line) { // // Handle and report on CUDA errors. // if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), file, line); exit(EXIT_FAILURE); } } void checkCUDAError(const char *msg, bool exitOnError) { // // Check cuda error and print result if appropriate. // hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); if (exitOnError) { exit(-1); } } } void cleanupCuda(void) { // // Clean up CUDA resources. // // // Explicitly cleans up all runtime-related resources associated with the // calling host thread. // HANDLE_ERROR( hipDeviceReset() ); } /********************* * End CUDA Utilites * *********************/ struct Args { bool debug; bool sequential; bool blocked; bool overlapped; // Data attributes int size, dimensions, alloc_size; int xSize, ySize, zSize; int xBlockSize, yBlockSize, zBlockSize, tBlockSize; // Run attributes int grid_size, block_count, thread_count, iterations; }; void usage(char *prog_name, string msg) { if (msg.size() > 0) { fputs(msg.c_str(), stderr); } fprintf(stderr, "%s\n", prog_name); fprintf(stderr, "Options are:\n"); fpe("-n<size> Set data size (default: 1024)"); fpe("-d<dims> Set number of data dimensions (1, 2, or 3) (default: 2)"); fpe("-g<size> Set grid size"); fpe("-b<num> Set block count"); fpe("-t<num> Set thread count"); fpe("-i<iter> Number of iterations to perform (default: 1000)"); fpe("-x<size> X Dimension"); fpe("-y<size> Y Dimension"); fpe("-z<size> Z Dimension"); fpe("-T<size> T Dimension"); fpe("-S Execute sequential, CPU version"); fpe("-B Execute blocked sequential, CPU version"); fpe("-O Execute sequential overlapped tiling, CPU version"); fpe("-D Print debug info"); fpe("-h Print usage info (this message)"); exit(EXIT_FAILURE); } Args parse_arguments(int argc, char *argv[]) { Args args = Args(); args.debug = false; args.sequential = false; args.blocked = false; args.overlapped = false; args.size = 1024; args.dimensions = 2; args.xSize = args.ySize = args.zSize = 1; args.xBlockSize = args.yBlockSize = args.zBlockSize = 1; args.grid_size = 1; args.block_count = -1; args.thread_count = -1; args.iterations = 1000; int opt; // Parse args while ((opt = getopt(argc, argv, "n:d:g:b:t:i:x:y:z:T:hSBOD")) != -1) { switch (opt) { case 'D': args.debug = true; break; case 'S': args.sequential = true; break; case 'B': args.blocked = true; break; case 'O': args.overlapped = true; break; case 'n': args.size = atoi(optarg); break; case 'd': args.dimensions = atoi(optarg); break; case 'g': args.grid_size = atoi(optarg); break; case 'b': args.block_count = atoi(optarg); break; case 't': args.thread_count = atoi(optarg); break; case 'i': args.iterations = atoi(optarg); break; case 'x': args.xBlockSize = atoi(optarg); break; case 'X': args.xSize = atoi(optarg); break; case 'y': args.yBlockSize = atoi(optarg); break; case 'Y': args.ySize = atoi(optarg); break; case 'z': args.zBlockSize = atoi(optarg); break; case 'Z': args.zSize = atoi(optarg); break; case 'T': args.tBlockSize = atoi(optarg); break; case 'h': usage(argv[0], ""); break; default: usage(argv[0], "Unrecognized option\n"); } } // check sizes if (args.size <= 0) { cout << "Data size must be larger than 0" << endl; exit(EXIT_FAILURE); } if (args.dimensions <= 0 || args.dimensions >= 4) { cerr << "Data must be 1, 2, or 3 dimensions" << endl; exit(EXIT_FAILURE); } // Calculations if (args.dimensions == 1) { args.alloc_size = args.size; } else if (args.dimensions == 2) { args.alloc_size = args.size * args.size; } else { args.alloc_size = args.size * args.size * args.size; } if (args.thread_count > 0) { args.block_count = args.alloc_size / args.thread_count; } else if (args.block_count > 0) { args.thread_count = args.alloc_size / args.block_count; } else { args.thread_count = 16; args.block_count = args.alloc_size / args.thread_count; } return args; } typedef struct { int dimensions; int height; int width; int depth; float *elements; } Matrix; Matrix initialize_matrix(int dimensions, int width, int height = 1, int depth = 1) { Matrix data; if (dimensions == 1 && width > 1) { data.width = width; data.height = 1; data.depth = 1; data.elements = (float *) malloc(width * sizeof(float)); data.elements[0] = 1.0; data.elements[width - 1] = 1.0; } else if (dimensions == 2 && width > 1 && height > 1) { data.width = width; data.height = height; data.depth = 1; data.elements = (float *) malloc(width * height * sizeof(float)); for (int y = 0; y < height; y += height - 1) { for (int x = 0; x < width; x++) { data.elements[y * width + x] = 1.0; } } for (int y = 0; y < height; y++) { for (int x = 0; x < width; x += width - 1) { data.elements[y * width + x] = 1.0; } } } else if (dimensions == 3 && width > 1 && height > 1 && depth > 1) { data.width = width; data.height = height; data.depth = depth; data.elements = (float *) malloc(width * height * depth * sizeof(float)); for (int z = 0; z < depth; z++) { // X = 0 & N planes for (int y = 0; y < height; y++) { for (int x = 0; x < width; x += width - 1) { data.elements[z * width * height + y * width + x] = 1.0; } } // Y = 0 & N planes for (int y = 0; y < height; y += height - 1) { for (int x = 0; x < width; x++) { data.elements[z * width * height + y * width + x] = 1.0; } } } // Z = 0 & N planes for (int z = 0; z < depth; z += depth - 1) { for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { data.elements[z * width * height + y * width + x] = 1.0; } } } } else { fprintf(stderr, "Improper dimension or size."); exit(1); } return data; } /**************** * CUDA KERNELS * ****************/ #define BLOCK_DIM_X TILE_WIDTH/PER_THREAD_X #define BLOCK_DIM_Y TILE_HEIGHT/PER_THREAD_Y #define BLOCK_DIM_Z TILE_DEPTH/PER_THREAD_Z // ceil integer division, have to use the BLOCK_DIM_ definitions rather than the defines themselves or it won't work #define PER_THREAD_OVERLAPPED_COUNT_X (TILE_AGE + TILE_WIDTH/PER_THREAD_X - 1) / (TILE_WIDTH/PER_THREAD_X) #define PER_THREAD_OVERLAPPED_COUNT_Y (TILE_AGE + TILE_HEIGHT/PER_THREAD_Y - 1) / (TILE_HEIGHT/PER_THREAD_Y) #define PER_THREAD_OVERLAPPED_COUNT_Z (TILE_AGE + TILE_DEPTH/PER_THREAD_Z - 1) / (TILE_DEPTH/PER_THREAD_Z) #define PER_THREAD_COMBINED_ITERATIONS_X (PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X) #define PER_THREAD_COMBINED_ITERATIONS_Y (PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y + PER_THREAD_OVERLAPPED_COUNT_Y) #define PER_THREAD_COMBINED_ITERATIONS_Z (PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z + PER_THREAD_OVERLAPPED_COUNT_Z) __global__ void jacobi1d(Matrix data, Matrix result) { int threadCol = threadIdx.x; int blockCol = blockIdx.x; int globalX[PER_THREAD_COMBINED_ITERATIONS_X]; int sharedX[PER_THREAD_COMBINED_ITERATIONS_X]; // Shared and local data arrays __shared__ float shared[2][(TILE_AGE + TILE_WIDTH + TILE_AGE)]; int sharedXMax = TILE_AGE + TILE_WIDTH + TILE_AGE - 1; int tCurr = 0; int tPrev = 1; // Some useful bits of info int globalBlockStart = blockCol * TILE_WIDTH; // Use >= comparison int globalBlockReadStart = max(0, globalBlockStart - TILE_AGE); // Use <= comparison int globalBlockReadEnd = min(data.width - 1, globalBlockStart + TILE_WIDTH + TILE_AGE); // Indexes in overlapped region left of the block #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int sharX = TILE_AGE + threadCol - (PER_THREAD_OVERLAPPED_COUNT_X - x) * BLOCK_DIM_X; int globX = globalBlockStart + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { // Locations inside the block int sharX = TILE_AGE + threadCol + BLOCK_DIM_X * (x - PER_THREAD_OVERLAPPED_COUNT_X); int globX = globalBlockStart + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int sharX = TILE_AGE + TILE_WIDTH + threadCol + BLOCK_DIM_X * (x - (PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X)); int globX = globalBlockStart + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } __syncthreads(); /** * Global Memory: * * Block 0 Block 1 Block 2 Block 3 Block 4 * | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | * * If we're block 2, we need: * * Block 0 Block 1 Block 2 Block 3 Block 4 * | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | * | this | * * And for a tile age of AGE we also need: * * Block 0 Block 1 Block 2 Block 3 Block 4 * | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | * | this | | this | * * So what we end up with is * * Block 0 Block 1 Block 2 Block 3 Block 4 * | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | * | AGE | TLSIZE | AGE | * * TILE_AGE + TILE_SIZE + TILE_AGE */ // Read the block data itself into shared memory, this will always coalesce nicely #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { shared[0][sharedX[x]] = data.elements[globalX[x]]; } // Read the left overlapped data into shared memory #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { // Left hand side data int globX = globalX[x]; if (globX >= globalBlockReadStart && globX <= globalBlockReadEnd) { shared[0][sharedX[x]] = data.elements[globX]; } } // Read the right overlapped data into shared memory #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { // Left hand side data int globX = globalX[x]; if (globX >= globalBlockReadStart && globX <= globalBlockReadEnd) { shared[0][sharedX[x]] = data.elements[globX]; } } /* * Calculate Values */ #pragma unroll for (int t = 1; t <= TILE_AGE; t++) { int tmp = tCurr; tCurr = tPrev; tPrev = tmp; __syncthreads(); int iterationCalculateStart = max(globalBlockStart - TILE_AGE + t - 1, 0); int iterationCalculateEnd = min(globalBlockStart + TILE_WIDTH + TILE_AGE - t, data.width - 1); // First let's do the block itself, since that always plays nicely #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > iterationCalculateStart && globX < iterationCalculateEnd) { shared[tCurr][sharX] = (shared[tPrev][sharX] + shared[tPrev][sharX - 1] + shared[tPrev][sharX + 1]) / 3; } else if (sharX >= 0){ shared[tCurr][sharX] = shared[tPrev][sharX]; } } // Now the left overlapped regions #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > iterationCalculateStart && globX < iterationCalculateEnd) { shared[tCurr][sharX] = (shared[tPrev][sharX - 1] + shared[tPrev][sharX] + shared[tPrev][sharX + 1]) / 3; } else if (sharX >= 0){ shared[tCurr][sharX] = shared[tPrev][sharX]; } } // And the right overlapped regions #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > iterationCalculateStart && globX < iterationCalculateEnd) { shared[tCurr][sharX] = (shared[tPrev][sharX - 1] + shared[tPrev][sharX] + shared[tPrev][sharX + 1]) / 3; } else if (sharX >= 0){ shared[tCurr][sharX] = shared[tPrev][sharX]; } } } __syncthreads(); #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { result.elements[globalX[x]] = shared[tCurr][sharedX[x]]; } } __global__ void jacobi2d(Matrix data, Matrix result) { int threadRow = threadIdx.y; int threadCol = threadIdx.x; int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Indexes so we don't have to recompute them. int globalIndex[PER_THREAD_COMBINED_ITERATIONS_Y][PER_THREAD_COMBINED_ITERATIONS_X]; int globalX[PER_THREAD_COMBINED_ITERATIONS_X]; int globalY[PER_THREAD_COMBINED_ITERATIONS_Y]; int sharedX[PER_THREAD_COMBINED_ITERATIONS_X]; int sharedY[PER_THREAD_COMBINED_ITERATIONS_Y]; // Shared and local data arrays __shared__ float shared[2][TILE_AGE + TILE_HEIGHT + TILE_AGE][TILE_AGE + TILE_WIDTH + TILE_AGE]; int sharedXMax = TILE_AGE + TILE_WIDTH + TILE_AGE - 1; int sharedYMax = TILE_AGE + TILE_HEIGHT + TILE_AGE - 1; int tCurr = 0; int tPrev = 1; // Some useful bits of info int globalBlockStartX = blockCol * TILE_WIDTH; int globalBlockStartY = blockRow * TILE_HEIGHT; // Use >= comparison int globalBlockReadStartX = max(0, globalBlockStartX - TILE_AGE); int globalBlockReadStartY = max(0, globalBlockStartY - TILE_AGE); // Use <= comparison int globalBlockReadEndX = min(data.width - 1, globalBlockStartX + TILE_WIDTH + TILE_AGE); int globalBlockReadEndY = min(data.height - 1, globalBlockStartY + TILE_HEIGHT + TILE_AGE); /* * Calculate indexes into the global and shared arrays */ // X Indexes // Overlapped region to the left of the block #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int sharX = TILE_AGE + threadCol - (PER_THREAD_OVERLAPPED_COUNT_X - x) * BLOCK_DIM_X; int globX = globalBlockStartX + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { // Locations inside the block int sharX = TILE_AGE + threadCol + BLOCK_DIM_X * (x - PER_THREAD_OVERLAPPED_COUNT_X); int globX = globalBlockStartX + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int sharX = TILE_AGE + TILE_WIDTH + threadCol + BLOCK_DIM_X * (x - (PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X)); int globX = globalBlockStartX + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } // Y Indexes // Overlapped region below block #pragma unroll for (int y = 0; y < PER_THREAD_OVERLAPPED_COUNT_Y; y++) { // Offset by TILE_AGE to make sure it's within the range since we're going back by TILE_AGE int sharY = TILE_AGE + threadRow - (PER_THREAD_OVERLAPPED_COUNT_Y - y) * BLOCK_DIM_Y; int globY = globalBlockStartY + sharY - TILE_AGE; if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) { sharedY[y] = -1; globalY[y] = -1; } else { sharedY[y] = sharY; globalY[y] = globY; } } // Main block #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) { int sharY = TILE_AGE + threadRow + BLOCK_DIM_Y * (y - PER_THREAD_OVERLAPPED_COUNT_Y); int globY = globalBlockStartY + sharY - TILE_AGE; if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) { sharedY[y] = -1; globalY[y] = -1; } else { sharedY[y] = sharY; globalY[y] = globY; } } // Above block #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y + PER_THREAD_OVERLAPPED_COUNT_Y; y++) { int sharY = TILE_AGE + TILE_HEIGHT + threadRow + BLOCK_DIM_Y * (y - (PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y)); int globY = globalBlockStartY + sharY - TILE_AGE; if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) { sharedY[y] = -1; globalY[y] = -1; } else { sharedY[y] = sharY; globalY[y] = globY; } } // Global absolute index #pragma unroll for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) { globalIndex[y][x] = globalX[x] + globalY[y] * data.width; } } /* * Copy into shared memory */ // TODO: Break into main block and overlapped regions blocks so the main block can at least be coalesced #pragma unroll for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) { /* * We want to be doing block-contiguous reads, e.g. for 2x2 block dimension, 2 per thread for x and y * we want the read pattern to look like: * * 11|22 * 11|22 * ----- * 33|44 * 33|44 * * Optimizing the width for reads is the responsibility of the calling code. */ if (globalX[x] >= 0 && globalX[x] < data.width && globalY[y] >= 0 && globalY[y] < data.height) { shared[0][sharedY[y]][sharedX[x]] = data.elements[globalIndex[y][x]]; } } } /* * Calculate Values */ // TODO Brevity and clarity might be better than this mismatched thing after all #pragma unroll for (int t = 1; t <= TILE_AGE; t++) { int tmp = tCurr; tCurr = tPrev; tPrev = tmp; __syncthreads(); int calculateStartX = max(globalBlockStartX - TILE_AGE + t - 1, 0); int calculateEndX = min(globalBlockStartX + TILE_WIDTH + TILE_AGE - t, data.width - 1); int calculateStartY = max(globalBlockStartY - TILE_AGE + t - 1, 0); int calculateEndY = min(globalBlockStartY + TILE_HEIGHT + TILE_AGE - t, data.height - 1); #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) { int globY = globalY[y]; int sharY = sharedY[y]; // First the main block since that's nicely laid out #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { // Calculate new value shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } // Now the left overlapped regions #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } // And the right overlapped regions #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } } // Now the overlapped region below the block #pragma unroll for (int y = 0; y < PER_THREAD_OVERLAPPED_COUNT_Y; y++) { int globY = globalY[y]; int sharY = sharedY[y]; #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { // Calculate new value shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } // Now the left and below overlapped region #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } // And the right and below overlapped region #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } } // Overlapped region above the block #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y + PER_THREAD_OVERLAPPED_COUNT_Y; y++) { int globY = globalY[y]; int sharY = sharedY[y]; #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { // Calculate new value shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } // Now the left and below overlapped region #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } // And the right and below overlapped region #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } } } __syncthreads(); #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) { int sharY = sharedY[y]; int globY = globalY[y]; #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { int sharX = sharedX[x]; int globX = globalX[x]; if (globX >= 0 && globX < data.width && globY >= 0 && globY < data.height) { result.elements[globalIndex[y][x]] = shared[tCurr][sharY][sharX]; } } } } __global__ void jacobi3d(Matrix data, Matrix result) { int threadCol = threadIdx.x; int threadRow = threadIdx.y; int threadDep = threadIdx.z; int blockCol = blockIdx.x; int blockRow = blockIdx.y; int blockDep = blockIdx.z; // Indexes so we don't have to recompute them. int globalIndex[PER_THREAD_COMBINED_ITERATIONS_Z][PER_THREAD_COMBINED_ITERATIONS_Y][PER_THREAD_COMBINED_ITERATIONS_X]; int globalX[PER_THREAD_COMBINED_ITERATIONS_X]; int globalY[PER_THREAD_COMBINED_ITERATIONS_Y]; int globalZ[PER_THREAD_COMBINED_ITERATIONS_Z]; int sharedX[PER_THREAD_COMBINED_ITERATIONS_X]; int sharedY[PER_THREAD_COMBINED_ITERATIONS_Y]; int sharedZ[PER_THREAD_COMBINED_ITERATIONS_Z]; // Shared and local data arrays __shared__ float shared[2][TILE_AGE + TILE_DEPTH + TILE_AGE][TILE_AGE + TILE_HEIGHT + TILE_AGE][TILE_AGE + TILE_WIDTH + TILE_AGE]; int sharedXMax = TILE_AGE + TILE_WIDTH + TILE_AGE - 1; int sharedYMax = TILE_AGE + TILE_HEIGHT + TILE_AGE - 1; int sharedZMax = TILE_AGE + TILE_DEPTH + TILE_AGE - 1; int tCurr = 0; int tPrev = 1; // Some useful bits of info int globalBlockStartX = blockCol * TILE_WIDTH; int globalBlockStartY = blockRow * TILE_HEIGHT; int globalBlockStartZ = blockDep * TILE_DEPTH; // Use >= comparison int globalBlockReadStartX = max(0, globalBlockStartX - TILE_AGE); int globalBlockReadStartY = max(0, globalBlockStartY - TILE_AGE); int globalBlockReadStartZ = max(0, globalBlockStartZ - TILE_AGE); // Use <= comparison int globalBlockReadEndX = min(data.width - 1, globalBlockStartX + TILE_WIDTH + TILE_AGE); int globalBlockReadEndY = min(data.height - 1, globalBlockStartY + TILE_HEIGHT + TILE_AGE); int globalBlockReadEndZ = min(data.depth - 1, globalBlockStartZ + TILE_DEPTH + TILE_AGE); /* * Calculate indexes into the global and shared arrays */ // Overlapped region to the left of the block #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int sharX = TILE_AGE + threadCol - (PER_THREAD_OVERLAPPED_COUNT_X - x) * BLOCK_DIM_X; int globX = globalBlockStartX + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { // Locations inside the block int sharX = TILE_AGE + threadCol + BLOCK_DIM_X * (x - PER_THREAD_OVERLAPPED_COUNT_X); int globX = globalBlockStartX + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int sharX = TILE_AGE + TILE_WIDTH + threadCol + BLOCK_DIM_X * (x - (PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X)); int globX = globalBlockStartX + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } // Y Indexes // Overlapped region below block #pragma unroll for (int y = 0; y < PER_THREAD_OVERLAPPED_COUNT_Y; y++) { // Offset by TILE_AGE to make sure it's within the range since we're going back by TILE_AGE int sharY = TILE_AGE + threadRow - (PER_THREAD_OVERLAPPED_COUNT_Y - y) * BLOCK_DIM_Y; int globY = globalBlockStartY + sharY - TILE_AGE; if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) { sharedY[y] = -1; globalY[y] = -1; } else { sharedY[y] = sharY; globalY[y] = globY; } } // Main block #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) { int sharY = TILE_AGE + threadRow + BLOCK_DIM_Y * (y - PER_THREAD_OVERLAPPED_COUNT_Y); int globY = globalBlockStartY + sharY - TILE_AGE; if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) { sharedY[y] = -1; globalY[y] = -1; } else { sharedY[y] = sharY; globalY[y] = globY; } } // Above block #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y + PER_THREAD_OVERLAPPED_COUNT_Y; y++) { int sharY = TILE_AGE + TILE_HEIGHT + threadRow + BLOCK_DIM_Y * (y - (PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y)); int globY = globalBlockStartY + sharY - TILE_AGE; if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) { sharedY[y] = -1; globalY[y] = -1; } else { sharedY[y] = sharY; globalY[y] = globY; } } // Z Indexes // Overlapped region in front of block #pragma unroll for (int z = 0; z < PER_THREAD_OVERLAPPED_COUNT_Z; z++) { // Offset by TILE_AGE to make sure it's within the range since we're going back by TILE_AGE int sharZ = TILE_AGE + threadDep - (PER_THREAD_OVERLAPPED_COUNT_Z - z) * BLOCK_DIM_Z; // Remove the offset for the global index int globZ = globalBlockStartZ + sharZ - TILE_AGE; if (sharZ < 0 || sharZ > sharedZMax || globZ < 0 || globZ > data.depth - 1) { sharedZ[z] = -1; globalZ[z] = -1; } else { sharedZ[z] = sharZ; globalZ[z] = globZ; } } // Main block #pragma unroll for (int z = PER_THREAD_OVERLAPPED_COUNT_Z; z < PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z; z++) { int sharZ = TILE_AGE + threadDep + BLOCK_DIM_Z * (z - PER_THREAD_OVERLAPPED_COUNT_Z); int globZ = globalBlockStartZ + sharZ - TILE_AGE; if (sharZ < 0 || sharZ > sharedZMax || globZ < 0 || globZ > data.depth - 1) { sharedZ[z] = -1; globalZ[z] = -1; } else { sharedZ[z] = sharZ; globalZ[z] = globZ; } } // Overlapped region behind block #pragma unroll for (int z = PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z; z < PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z + PER_THREAD_OVERLAPPED_COUNT_Z; z++) { int sharZ = TILE_AGE + TILE_DEPTH + threadDep + BLOCK_DIM_Z * (z - (PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z)); int globZ = globalBlockStartZ + sharZ - TILE_AGE; if (sharZ < 0 || sharZ > sharedZMax || globZ < 0 || globZ > data.depth - 1) { sharedZ[z] = -1; globalZ[z] = -1; } else { sharedZ[z] = sharZ; globalZ[z] = globZ; } } // Global absolute index #pragma unroll for (int z = 0; z < PER_THREAD_COMBINED_ITERATIONS_Z; z++) { int zTemp = globalZ[z] * data.width * data.height; #pragma unroll for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) { int yTemp = globalY[y] * data.width; #pragma unroll for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) { globalIndex[z][y][x] = globalX[x] + yTemp + zTemp; } } } /* * Copy into shared memory */ #pragma unroll for (int z = 0; z < PER_THREAD_COMBINED_ITERATIONS_Z; z++) { #pragma unroll for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) { if (globalX[x] >= 0 && globalX[x] < data.width && globalY[y] >= 0 && globalY[y] < data.height && globalZ[z] >= 0 && globalZ[z] < data.depth) { shared[0][sharedZ[z]][sharedY[y]][sharedX[x]] = data.elements[globalIndex[z][y][x]]; } } } } #pragma unroll for (int t = 1; t <= TILE_AGE; t++) { int tmp = tCurr; tCurr = tPrev; tPrev = tmp; __syncthreads(); int calculateStartX = max(globalBlockStartX - TILE_AGE + t - 1, 0); int calculateEndX = min(globalBlockStartX + TILE_WIDTH + TILE_AGE - t, data.width - 1); int calculateStartY = max(globalBlockStartY - TILE_AGE + t - 1, 0); int calculateEndY = min(globalBlockStartY + TILE_HEIGHT + TILE_AGE - t, data.height - 1); int calculateStartZ = max(globalBlockStartZ - TILE_AGE + t - 1, 0); int calculateEndZ = min(globalBlockStartZ + TILE_DEPTH + TILE_AGE - t, data.depth - 1); #pragma unroll for (int z = 0; z < PER_THREAD_COMBINED_ITERATIONS_Z; z++) { int globZ = globalZ[z]; int sharZ = sharedZ[z]; #pragma unroll for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) { int globY = globalY[y]; int sharY = sharedY[y]; #pragma unroll for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY && globZ > calculateStartZ && globZ < calculateEndZ) { shared[tCurr][sharZ][sharY][sharX] = ( shared[tPrev][sharZ][sharY][sharX] + shared[tPrev][sharZ][sharY][sharX - 1] + shared[tPrev][sharZ][sharY][sharX + 1] + shared[tPrev][sharZ][sharY - 1][sharX] + shared[tPrev][sharZ][sharY + 1][sharX] + shared[tPrev][sharZ - 1][sharY][sharX] + shared[tPrev][sharZ + 1][sharY][sharX] ) / 7; } else if (sharX >= 0 && sharY >= 0 && sharZ >= 0) { shared[tCurr][sharZ][sharY][sharX] = shared[tPrev][sharZ][sharY][sharX]; } } } } } __syncthreads(); #pragma unroll for (int z = PER_THREAD_OVERLAPPED_COUNT_Z; z < PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z; z++) { int sharZ = sharedZ[z]; int globZ = globalZ[z]; #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) { int sharY = sharedY[y]; int globY = globalY[y]; #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { int sharX = sharedX[x]; int globX = globalX[x]; if (globX >= 0 && globY >= 0 && globZ >= 0) { result.elements[globalIndex[z][y][x]] = shared[tCurr][sharZ][sharY][sharX]; } } } } } /******************** * END CUDA KERNELS * ********************/ Matrix initialize_device(Matrix A, bool copyToDevice) { Matrix deviceA; deviceA.width = A.width; deviceA.height = A.height; deviceA.depth = A.depth; deviceA.dimensions = A.dimensions; size_t sizeA = A.width * A.height * A.depth * sizeof(float); HANDLE_ERROR(hipMalloc((void **) &deviceA.elements, sizeA)); if (copyToDevice) { HANDLE_ERROR(hipMemcpy(deviceA.elements, A.elements, sizeA, hipMemcpyHostToDevice)); } return deviceA; } void callKernel(Args args, Matrix A, Matrix B) { Matrix deviceA, deviceB; deviceA = initialize_device(A, true); deviceB = initialize_device(B, false); if (args.dimensions == 1) { dim3 blocks(max(args.size / TILE_WIDTH, 1)); dim3 threads(max(TILE_WIDTH / PER_THREAD_X, 1)); for (int t = 0; t < args.iterations / TILE_AGE; t++) { hipLaunchKernelGGL(( jacobi1d), dim3(blocks), dim3(threads), 0, 0, deviceA, deviceB); // checkCUDAError("jacobi1d", true); swap(deviceA, deviceB); } } else if (args.dimensions == 2) { dim3 blocks(max(args.size / TILE_WIDTH, 1), max(args.size / TILE_HEIGHT, 1)); dim3 threads(max(TILE_WIDTH / PER_THREAD_X, 1), max(TILE_HEIGHT / PER_THREAD_Y, 1)); for (int t = 0; t < args.iterations / TILE_AGE; t++) { hipLaunchKernelGGL(( jacobi2d), dim3(blocks), dim3(threads), 0, 0, deviceA, deviceB); // checkCUDAError("jacobi2d", true); swap(deviceA, deviceB); } } else { dim3 blocks(max(args.size / TILE_WIDTH, 1), max(args.size / TILE_HEIGHT, 1), max(args.size / TILE_DEPTH, 1)); dim3 threads(max(TILE_WIDTH / PER_THREAD_X, 1), max(TILE_HEIGHT / PER_THREAD_Y, 1), max(TILE_DEPTH / PER_THREAD_Z, 1)); for (int t = 0; t < args.iterations / TILE_AGE; t++) { hipLaunchKernelGGL(( jacobi3d), dim3(blocks), dim3(threads), 0, 0, deviceA, deviceB); // checkCUDAError("jacobi3d", true); swap(deviceA, deviceB); } } HANDLE_ERROR(hipMemcpy(B.elements, deviceA.elements, A.width * A.height * A.depth * sizeof(float), hipMemcpyDeviceToHost)); } // Data output void print_data(float *data, int size, int dimensions) { // if (size > 32) { // cerr << "Data too big to print\n" << endl; // return; // } if (dimensions == 1) { for (int x = 0; x < size; x++) { printf("%.3f ", data[x]); } } else if (dimensions == 2) { for (int y = 0; y < size; y++) { for (int x = 0; x < size; x++) { printf("%.3f ", data[y * size + x]); } cout << endl; } } else if (dimensions == 3) { for (int z = 0; z < size; z++) { for (int y = 0; y < size; y++) { for (int x = 0; x < size; x++) { printf("%.3f ", data[z * size * size + y * size + x]); } cout << endl; } cout << endl; } } cout << endl << endl; } // Main int main(int argc, char *argv[]) { Args args = parse_arguments(argc, argv); Matrix A, B; A = initialize_matrix(args.dimensions, args.size, args.size, args.size); B = initialize_matrix(args.dimensions, args.size, args.size, args.size); float runtime; struct timeval start, end; gettimeofday(&start, NULL); callKernel(args, A, B); gettimeofday(&end, NULL); runtime = ((end.tv_sec - start.tv_sec) * 1000.0) + ((end.tv_usec - start.tv_usec) / 1000.0); printf("Processing Time: %4.4f milliseconds\n", runtime); if (args.debug) { print_data(B.elements, args.size, args.dimensions); } }
a09ea539f755816726a01a5689cf7d735f6070fd.cu
#include <stdio.h> #include <iostream> #include <unistd.h> #include <sys/time.h> using namespace std; // Shorthand for formatting and printing usage options to stderr #define fpe(msg) fprintf(stderr, "\t%s\n", msg); // Shorthand for handling CUDA errors. #define HANDLE_ERROR(err) ( HandleError( err, __FILE__, __LINE__ ) ) /** * DEFINED VALUES HERE */ #define TILE_WIDTH 64 #define TILE_HEIGHT 8 #define TILE_DEPTH 1 #define TILE_AGE 2 #define PER_THREAD_X 2 #define PER_THREAD_Y 2 #define PER_THREAD_Z 1 /***************** * CUDA Utilites * *****************/ void HandleError(cudaError_t err, const char *file, int line) { // // Handle and report on CUDA errors. // if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } void checkCUDAError(const char *msg, bool exitOnError) { // // Check cuda error and print result if appropriate. // cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); if (exitOnError) { exit(-1); } } } void cleanupCuda(void) { // // Clean up CUDA resources. // // // Explicitly cleans up all runtime-related resources associated with the // calling host thread. // HANDLE_ERROR( cudaThreadExit() ); } /********************* * End CUDA Utilites * *********************/ struct Args { bool debug; bool sequential; bool blocked; bool overlapped; // Data attributes int size, dimensions, alloc_size; int xSize, ySize, zSize; int xBlockSize, yBlockSize, zBlockSize, tBlockSize; // Run attributes int grid_size, block_count, thread_count, iterations; }; void usage(char *prog_name, string msg) { if (msg.size() > 0) { fputs(msg.c_str(), stderr); } fprintf(stderr, "%s\n", prog_name); fprintf(stderr, "Options are:\n"); fpe("-n<size> Set data size (default: 1024)"); fpe("-d<dims> Set number of data dimensions (1, 2, or 3) (default: 2)"); fpe("-g<size> Set grid size"); fpe("-b<num> Set block count"); fpe("-t<num> Set thread count"); fpe("-i<iter> Number of iterations to perform (default: 1000)"); fpe("-x<size> X Dimension"); fpe("-y<size> Y Dimension"); fpe("-z<size> Z Dimension"); fpe("-T<size> T Dimension"); fpe("-S Execute sequential, CPU version"); fpe("-B Execute blocked sequential, CPU version"); fpe("-O Execute sequential overlapped tiling, CPU version"); fpe("-D Print debug info"); fpe("-h Print usage info (this message)"); exit(EXIT_FAILURE); } Args parse_arguments(int argc, char *argv[]) { Args args = Args(); args.debug = false; args.sequential = false; args.blocked = false; args.overlapped = false; args.size = 1024; args.dimensions = 2; args.xSize = args.ySize = args.zSize = 1; args.xBlockSize = args.yBlockSize = args.zBlockSize = 1; args.grid_size = 1; args.block_count = -1; args.thread_count = -1; args.iterations = 1000; int opt; // Parse args while ((opt = getopt(argc, argv, "n:d:g:b:t:i:x:y:z:T:hSBOD")) != -1) { switch (opt) { case 'D': args.debug = true; break; case 'S': args.sequential = true; break; case 'B': args.blocked = true; break; case 'O': args.overlapped = true; break; case 'n': args.size = atoi(optarg); break; case 'd': args.dimensions = atoi(optarg); break; case 'g': args.grid_size = atoi(optarg); break; case 'b': args.block_count = atoi(optarg); break; case 't': args.thread_count = atoi(optarg); break; case 'i': args.iterations = atoi(optarg); break; case 'x': args.xBlockSize = atoi(optarg); break; case 'X': args.xSize = atoi(optarg); break; case 'y': args.yBlockSize = atoi(optarg); break; case 'Y': args.ySize = atoi(optarg); break; case 'z': args.zBlockSize = atoi(optarg); break; case 'Z': args.zSize = atoi(optarg); break; case 'T': args.tBlockSize = atoi(optarg); break; case 'h': usage(argv[0], ""); break; default: usage(argv[0], "Unrecognized option\n"); } } // check sizes if (args.size <= 0) { cout << "Data size must be larger than 0" << endl; exit(EXIT_FAILURE); } if (args.dimensions <= 0 || args.dimensions >= 4) { cerr << "Data must be 1, 2, or 3 dimensions" << endl; exit(EXIT_FAILURE); } // Calculations if (args.dimensions == 1) { args.alloc_size = args.size; } else if (args.dimensions == 2) { args.alloc_size = args.size * args.size; } else { args.alloc_size = args.size * args.size * args.size; } if (args.thread_count > 0) { args.block_count = args.alloc_size / args.thread_count; } else if (args.block_count > 0) { args.thread_count = args.alloc_size / args.block_count; } else { args.thread_count = 16; args.block_count = args.alloc_size / args.thread_count; } return args; } typedef struct { int dimensions; int height; int width; int depth; float *elements; } Matrix; Matrix initialize_matrix(int dimensions, int width, int height = 1, int depth = 1) { Matrix data; if (dimensions == 1 && width > 1) { data.width = width; data.height = 1; data.depth = 1; data.elements = (float *) malloc(width * sizeof(float)); data.elements[0] = 1.0; data.elements[width - 1] = 1.0; } else if (dimensions == 2 && width > 1 && height > 1) { data.width = width; data.height = height; data.depth = 1; data.elements = (float *) malloc(width * height * sizeof(float)); for (int y = 0; y < height; y += height - 1) { for (int x = 0; x < width; x++) { data.elements[y * width + x] = 1.0; } } for (int y = 0; y < height; y++) { for (int x = 0; x < width; x += width - 1) { data.elements[y * width + x] = 1.0; } } } else if (dimensions == 3 && width > 1 && height > 1 && depth > 1) { data.width = width; data.height = height; data.depth = depth; data.elements = (float *) malloc(width * height * depth * sizeof(float)); for (int z = 0; z < depth; z++) { // X = 0 & N planes for (int y = 0; y < height; y++) { for (int x = 0; x < width; x += width - 1) { data.elements[z * width * height + y * width + x] = 1.0; } } // Y = 0 & N planes for (int y = 0; y < height; y += height - 1) { for (int x = 0; x < width; x++) { data.elements[z * width * height + y * width + x] = 1.0; } } } // Z = 0 & N planes for (int z = 0; z < depth; z += depth - 1) { for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { data.elements[z * width * height + y * width + x] = 1.0; } } } } else { fprintf(stderr, "Improper dimension or size."); exit(1); } return data; } /**************** * CUDA KERNELS * ****************/ #define BLOCK_DIM_X TILE_WIDTH/PER_THREAD_X #define BLOCK_DIM_Y TILE_HEIGHT/PER_THREAD_Y #define BLOCK_DIM_Z TILE_DEPTH/PER_THREAD_Z // ceil integer division, have to use the BLOCK_DIM_ definitions rather than the defines themselves or it won't work #define PER_THREAD_OVERLAPPED_COUNT_X (TILE_AGE + TILE_WIDTH/PER_THREAD_X - 1) / (TILE_WIDTH/PER_THREAD_X) #define PER_THREAD_OVERLAPPED_COUNT_Y (TILE_AGE + TILE_HEIGHT/PER_THREAD_Y - 1) / (TILE_HEIGHT/PER_THREAD_Y) #define PER_THREAD_OVERLAPPED_COUNT_Z (TILE_AGE + TILE_DEPTH/PER_THREAD_Z - 1) / (TILE_DEPTH/PER_THREAD_Z) #define PER_THREAD_COMBINED_ITERATIONS_X (PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X) #define PER_THREAD_COMBINED_ITERATIONS_Y (PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y + PER_THREAD_OVERLAPPED_COUNT_Y) #define PER_THREAD_COMBINED_ITERATIONS_Z (PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z + PER_THREAD_OVERLAPPED_COUNT_Z) __global__ void jacobi1d(Matrix data, Matrix result) { int threadCol = threadIdx.x; int blockCol = blockIdx.x; int globalX[PER_THREAD_COMBINED_ITERATIONS_X]; int sharedX[PER_THREAD_COMBINED_ITERATIONS_X]; // Shared and local data arrays __shared__ float shared[2][(TILE_AGE + TILE_WIDTH + TILE_AGE)]; int sharedXMax = TILE_AGE + TILE_WIDTH + TILE_AGE - 1; int tCurr = 0; int tPrev = 1; // Some useful bits of info int globalBlockStart = blockCol * TILE_WIDTH; // Use >= comparison int globalBlockReadStart = max(0, globalBlockStart - TILE_AGE); // Use <= comparison int globalBlockReadEnd = min(data.width - 1, globalBlockStart + TILE_WIDTH + TILE_AGE); // Indexes in overlapped region left of the block #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int sharX = TILE_AGE + threadCol - (PER_THREAD_OVERLAPPED_COUNT_X - x) * BLOCK_DIM_X; int globX = globalBlockStart + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { // Locations inside the block int sharX = TILE_AGE + threadCol + BLOCK_DIM_X * (x - PER_THREAD_OVERLAPPED_COUNT_X); int globX = globalBlockStart + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int sharX = TILE_AGE + TILE_WIDTH + threadCol + BLOCK_DIM_X * (x - (PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X)); int globX = globalBlockStart + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } __syncthreads(); /** * Global Memory: * * Block 0 Block 1 Block 2 Block 3 Block 4 * | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | * * If we're block 2, we need: * * Block 0 Block 1 Block 2 Block 3 Block 4 * | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | * | this | * * And for a tile age of AGE we also need: * * Block 0 Block 1 Block 2 Block 3 Block 4 * | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | * | this | | this | * * So what we end up with is * * Block 0 Block 1 Block 2 Block 3 Block 4 * | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | * | AGE | TLSIZE | AGE | * * TILE_AGE + TILE_SIZE + TILE_AGE */ // Read the block data itself into shared memory, this will always coalesce nicely #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { shared[0][sharedX[x]] = data.elements[globalX[x]]; } // Read the left overlapped data into shared memory #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { // Left hand side data int globX = globalX[x]; if (globX >= globalBlockReadStart && globX <= globalBlockReadEnd) { shared[0][sharedX[x]] = data.elements[globX]; } } // Read the right overlapped data into shared memory #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { // Left hand side data int globX = globalX[x]; if (globX >= globalBlockReadStart && globX <= globalBlockReadEnd) { shared[0][sharedX[x]] = data.elements[globX]; } } /* * Calculate Values */ #pragma unroll for (int t = 1; t <= TILE_AGE; t++) { int tmp = tCurr; tCurr = tPrev; tPrev = tmp; __syncthreads(); int iterationCalculateStart = max(globalBlockStart - TILE_AGE + t - 1, 0); int iterationCalculateEnd = min(globalBlockStart + TILE_WIDTH + TILE_AGE - t, data.width - 1); // First let's do the block itself, since that always plays nicely #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > iterationCalculateStart && globX < iterationCalculateEnd) { shared[tCurr][sharX] = (shared[tPrev][sharX] + shared[tPrev][sharX - 1] + shared[tPrev][sharX + 1]) / 3; } else if (sharX >= 0){ shared[tCurr][sharX] = shared[tPrev][sharX]; } } // Now the left overlapped regions #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > iterationCalculateStart && globX < iterationCalculateEnd) { shared[tCurr][sharX] = (shared[tPrev][sharX - 1] + shared[tPrev][sharX] + shared[tPrev][sharX + 1]) / 3; } else if (sharX >= 0){ shared[tCurr][sharX] = shared[tPrev][sharX]; } } // And the right overlapped regions #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > iterationCalculateStart && globX < iterationCalculateEnd) { shared[tCurr][sharX] = (shared[tPrev][sharX - 1] + shared[tPrev][sharX] + shared[tPrev][sharX + 1]) / 3; } else if (sharX >= 0){ shared[tCurr][sharX] = shared[tPrev][sharX]; } } } __syncthreads(); #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { result.elements[globalX[x]] = shared[tCurr][sharedX[x]]; } } __global__ void jacobi2d(Matrix data, Matrix result) { int threadRow = threadIdx.y; int threadCol = threadIdx.x; int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Indexes so we don't have to recompute them. int globalIndex[PER_THREAD_COMBINED_ITERATIONS_Y][PER_THREAD_COMBINED_ITERATIONS_X]; int globalX[PER_THREAD_COMBINED_ITERATIONS_X]; int globalY[PER_THREAD_COMBINED_ITERATIONS_Y]; int sharedX[PER_THREAD_COMBINED_ITERATIONS_X]; int sharedY[PER_THREAD_COMBINED_ITERATIONS_Y]; // Shared and local data arrays __shared__ float shared[2][TILE_AGE + TILE_HEIGHT + TILE_AGE][TILE_AGE + TILE_WIDTH + TILE_AGE]; int sharedXMax = TILE_AGE + TILE_WIDTH + TILE_AGE - 1; int sharedYMax = TILE_AGE + TILE_HEIGHT + TILE_AGE - 1; int tCurr = 0; int tPrev = 1; // Some useful bits of info int globalBlockStartX = blockCol * TILE_WIDTH; int globalBlockStartY = blockRow * TILE_HEIGHT; // Use >= comparison int globalBlockReadStartX = max(0, globalBlockStartX - TILE_AGE); int globalBlockReadStartY = max(0, globalBlockStartY - TILE_AGE); // Use <= comparison int globalBlockReadEndX = min(data.width - 1, globalBlockStartX + TILE_WIDTH + TILE_AGE); int globalBlockReadEndY = min(data.height - 1, globalBlockStartY + TILE_HEIGHT + TILE_AGE); /* * Calculate indexes into the global and shared arrays */ // X Indexes // Overlapped region to the left of the block #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int sharX = TILE_AGE + threadCol - (PER_THREAD_OVERLAPPED_COUNT_X - x) * BLOCK_DIM_X; int globX = globalBlockStartX + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { // Locations inside the block int sharX = TILE_AGE + threadCol + BLOCK_DIM_X * (x - PER_THREAD_OVERLAPPED_COUNT_X); int globX = globalBlockStartX + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int sharX = TILE_AGE + TILE_WIDTH + threadCol + BLOCK_DIM_X * (x - (PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X)); int globX = globalBlockStartX + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } // Y Indexes // Overlapped region below block #pragma unroll for (int y = 0; y < PER_THREAD_OVERLAPPED_COUNT_Y; y++) { // Offset by TILE_AGE to make sure it's within the range since we're going back by TILE_AGE int sharY = TILE_AGE + threadRow - (PER_THREAD_OVERLAPPED_COUNT_Y - y) * BLOCK_DIM_Y; int globY = globalBlockStartY + sharY - TILE_AGE; if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) { sharedY[y] = -1; globalY[y] = -1; } else { sharedY[y] = sharY; globalY[y] = globY; } } // Main block #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) { int sharY = TILE_AGE + threadRow + BLOCK_DIM_Y * (y - PER_THREAD_OVERLAPPED_COUNT_Y); int globY = globalBlockStartY + sharY - TILE_AGE; if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) { sharedY[y] = -1; globalY[y] = -1; } else { sharedY[y] = sharY; globalY[y] = globY; } } // Above block #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y + PER_THREAD_OVERLAPPED_COUNT_Y; y++) { int sharY = TILE_AGE + TILE_HEIGHT + threadRow + BLOCK_DIM_Y * (y - (PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y)); int globY = globalBlockStartY + sharY - TILE_AGE; if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) { sharedY[y] = -1; globalY[y] = -1; } else { sharedY[y] = sharY; globalY[y] = globY; } } // Global absolute index #pragma unroll for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) { globalIndex[y][x] = globalX[x] + globalY[y] * data.width; } } /* * Copy into shared memory */ // TODO: Break into main block and overlapped regions blocks so the main block can at least be coalesced #pragma unroll for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) { /* * We want to be doing block-contiguous reads, e.g. for 2x2 block dimension, 2 per thread for x and y * we want the read pattern to look like: * * 11|22 * 11|22 * ----- * 33|44 * 33|44 * * Optimizing the width for reads is the responsibility of the calling code. */ if (globalX[x] >= 0 && globalX[x] < data.width && globalY[y] >= 0 && globalY[y] < data.height) { shared[0][sharedY[y]][sharedX[x]] = data.elements[globalIndex[y][x]]; } } } /* * Calculate Values */ // TODO Brevity and clarity might be better than this mismatched thing after all #pragma unroll for (int t = 1; t <= TILE_AGE; t++) { int tmp = tCurr; tCurr = tPrev; tPrev = tmp; __syncthreads(); int calculateStartX = max(globalBlockStartX - TILE_AGE + t - 1, 0); int calculateEndX = min(globalBlockStartX + TILE_WIDTH + TILE_AGE - t, data.width - 1); int calculateStartY = max(globalBlockStartY - TILE_AGE + t - 1, 0); int calculateEndY = min(globalBlockStartY + TILE_HEIGHT + TILE_AGE - t, data.height - 1); #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) { int globY = globalY[y]; int sharY = sharedY[y]; // First the main block since that's nicely laid out #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { // Calculate new value shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } // Now the left overlapped regions #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } // And the right overlapped regions #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } } // Now the overlapped region below the block #pragma unroll for (int y = 0; y < PER_THREAD_OVERLAPPED_COUNT_Y; y++) { int globY = globalY[y]; int sharY = sharedY[y]; #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { // Calculate new value shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } // Now the left and below overlapped region #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } // And the right and below overlapped region #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } } // Overlapped region above the block #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y + PER_THREAD_OVERLAPPED_COUNT_Y; y++) { int globY = globalY[y]; int sharY = sharedY[y]; #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { // Calculate new value shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } // Now the left and below overlapped region #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } // And the right and below overlapped region #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) { shared[tCurr][sharY][sharX] = ( shared[tPrev][sharY][sharX - 1] + shared[tPrev][sharY][sharX] + shared[tPrev][sharY][sharX + 1] + shared[tPrev][sharY - 1][sharX] + shared[tPrev][sharY + 1][sharX] ) * 0.2f; } else if (sharX >= 0 && sharY >=0){ shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX]; } } } } __syncthreads(); #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) { int sharY = sharedY[y]; int globY = globalY[y]; #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { int sharX = sharedX[x]; int globX = globalX[x]; if (globX >= 0 && globX < data.width && globY >= 0 && globY < data.height) { result.elements[globalIndex[y][x]] = shared[tCurr][sharY][sharX]; } } } } __global__ void jacobi3d(Matrix data, Matrix result) { int threadCol = threadIdx.x; int threadRow = threadIdx.y; int threadDep = threadIdx.z; int blockCol = blockIdx.x; int blockRow = blockIdx.y; int blockDep = blockIdx.z; // Indexes so we don't have to recompute them. int globalIndex[PER_THREAD_COMBINED_ITERATIONS_Z][PER_THREAD_COMBINED_ITERATIONS_Y][PER_THREAD_COMBINED_ITERATIONS_X]; int globalX[PER_THREAD_COMBINED_ITERATIONS_X]; int globalY[PER_THREAD_COMBINED_ITERATIONS_Y]; int globalZ[PER_THREAD_COMBINED_ITERATIONS_Z]; int sharedX[PER_THREAD_COMBINED_ITERATIONS_X]; int sharedY[PER_THREAD_COMBINED_ITERATIONS_Y]; int sharedZ[PER_THREAD_COMBINED_ITERATIONS_Z]; // Shared and local data arrays __shared__ float shared[2][TILE_AGE + TILE_DEPTH + TILE_AGE][TILE_AGE + TILE_HEIGHT + TILE_AGE][TILE_AGE + TILE_WIDTH + TILE_AGE]; int sharedXMax = TILE_AGE + TILE_WIDTH + TILE_AGE - 1; int sharedYMax = TILE_AGE + TILE_HEIGHT + TILE_AGE - 1; int sharedZMax = TILE_AGE + TILE_DEPTH + TILE_AGE - 1; int tCurr = 0; int tPrev = 1; // Some useful bits of info int globalBlockStartX = blockCol * TILE_WIDTH; int globalBlockStartY = blockRow * TILE_HEIGHT; int globalBlockStartZ = blockDep * TILE_DEPTH; // Use >= comparison int globalBlockReadStartX = max(0, globalBlockStartX - TILE_AGE); int globalBlockReadStartY = max(0, globalBlockStartY - TILE_AGE); int globalBlockReadStartZ = max(0, globalBlockStartZ - TILE_AGE); // Use <= comparison int globalBlockReadEndX = min(data.width - 1, globalBlockStartX + TILE_WIDTH + TILE_AGE); int globalBlockReadEndY = min(data.height - 1, globalBlockStartY + TILE_HEIGHT + TILE_AGE); int globalBlockReadEndZ = min(data.depth - 1, globalBlockStartZ + TILE_DEPTH + TILE_AGE); /* * Calculate indexes into the global and shared arrays */ // Overlapped region to the left of the block #pragma unroll for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) { int sharX = TILE_AGE + threadCol - (PER_THREAD_OVERLAPPED_COUNT_X - x) * BLOCK_DIM_X; int globX = globalBlockStartX + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { // Locations inside the block int sharX = TILE_AGE + threadCol + BLOCK_DIM_X * (x - PER_THREAD_OVERLAPPED_COUNT_X); int globX = globalBlockStartX + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) { int sharX = TILE_AGE + TILE_WIDTH + threadCol + BLOCK_DIM_X * (x - (PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X)); int globX = globalBlockStartX + sharX - TILE_AGE; if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) { sharedX[x] = -1; globalX[x] = -1; } else { sharedX[x] = sharX; globalX[x] = globX; } } // Y Indexes // Overlapped region below block #pragma unroll for (int y = 0; y < PER_THREAD_OVERLAPPED_COUNT_Y; y++) { // Offset by TILE_AGE to make sure it's within the range since we're going back by TILE_AGE int sharY = TILE_AGE + threadRow - (PER_THREAD_OVERLAPPED_COUNT_Y - y) * BLOCK_DIM_Y; int globY = globalBlockStartY + sharY - TILE_AGE; if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) { sharedY[y] = -1; globalY[y] = -1; } else { sharedY[y] = sharY; globalY[y] = globY; } } // Main block #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) { int sharY = TILE_AGE + threadRow + BLOCK_DIM_Y * (y - PER_THREAD_OVERLAPPED_COUNT_Y); int globY = globalBlockStartY + sharY - TILE_AGE; if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) { sharedY[y] = -1; globalY[y] = -1; } else { sharedY[y] = sharY; globalY[y] = globY; } } // Above block #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y + PER_THREAD_OVERLAPPED_COUNT_Y; y++) { int sharY = TILE_AGE + TILE_HEIGHT + threadRow + BLOCK_DIM_Y * (y - (PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y)); int globY = globalBlockStartY + sharY - TILE_AGE; if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) { sharedY[y] = -1; globalY[y] = -1; } else { sharedY[y] = sharY; globalY[y] = globY; } } // Z Indexes // Overlapped region in front of block #pragma unroll for (int z = 0; z < PER_THREAD_OVERLAPPED_COUNT_Z; z++) { // Offset by TILE_AGE to make sure it's within the range since we're going back by TILE_AGE int sharZ = TILE_AGE + threadDep - (PER_THREAD_OVERLAPPED_COUNT_Z - z) * BLOCK_DIM_Z; // Remove the offset for the global index int globZ = globalBlockStartZ + sharZ - TILE_AGE; if (sharZ < 0 || sharZ > sharedZMax || globZ < 0 || globZ > data.depth - 1) { sharedZ[z] = -1; globalZ[z] = -1; } else { sharedZ[z] = sharZ; globalZ[z] = globZ; } } // Main block #pragma unroll for (int z = PER_THREAD_OVERLAPPED_COUNT_Z; z < PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z; z++) { int sharZ = TILE_AGE + threadDep + BLOCK_DIM_Z * (z - PER_THREAD_OVERLAPPED_COUNT_Z); int globZ = globalBlockStartZ + sharZ - TILE_AGE; if (sharZ < 0 || sharZ > sharedZMax || globZ < 0 || globZ > data.depth - 1) { sharedZ[z] = -1; globalZ[z] = -1; } else { sharedZ[z] = sharZ; globalZ[z] = globZ; } } // Overlapped region behind block #pragma unroll for (int z = PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z; z < PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z + PER_THREAD_OVERLAPPED_COUNT_Z; z++) { int sharZ = TILE_AGE + TILE_DEPTH + threadDep + BLOCK_DIM_Z * (z - (PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z)); int globZ = globalBlockStartZ + sharZ - TILE_AGE; if (sharZ < 0 || sharZ > sharedZMax || globZ < 0 || globZ > data.depth - 1) { sharedZ[z] = -1; globalZ[z] = -1; } else { sharedZ[z] = sharZ; globalZ[z] = globZ; } } // Global absolute index #pragma unroll for (int z = 0; z < PER_THREAD_COMBINED_ITERATIONS_Z; z++) { int zTemp = globalZ[z] * data.width * data.height; #pragma unroll for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) { int yTemp = globalY[y] * data.width; #pragma unroll for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) { globalIndex[z][y][x] = globalX[x] + yTemp + zTemp; } } } /* * Copy into shared memory */ #pragma unroll for (int z = 0; z < PER_THREAD_COMBINED_ITERATIONS_Z; z++) { #pragma unroll for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) { if (globalX[x] >= 0 && globalX[x] < data.width && globalY[y] >= 0 && globalY[y] < data.height && globalZ[z] >= 0 && globalZ[z] < data.depth) { shared[0][sharedZ[z]][sharedY[y]][sharedX[x]] = data.elements[globalIndex[z][y][x]]; } } } } #pragma unroll for (int t = 1; t <= TILE_AGE; t++) { int tmp = tCurr; tCurr = tPrev; tPrev = tmp; __syncthreads(); int calculateStartX = max(globalBlockStartX - TILE_AGE + t - 1, 0); int calculateEndX = min(globalBlockStartX + TILE_WIDTH + TILE_AGE - t, data.width - 1); int calculateStartY = max(globalBlockStartY - TILE_AGE + t - 1, 0); int calculateEndY = min(globalBlockStartY + TILE_HEIGHT + TILE_AGE - t, data.height - 1); int calculateStartZ = max(globalBlockStartZ - TILE_AGE + t - 1, 0); int calculateEndZ = min(globalBlockStartZ + TILE_DEPTH + TILE_AGE - t, data.depth - 1); #pragma unroll for (int z = 0; z < PER_THREAD_COMBINED_ITERATIONS_Z; z++) { int globZ = globalZ[z]; int sharZ = sharedZ[z]; #pragma unroll for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) { int globY = globalY[y]; int sharY = sharedY[y]; #pragma unroll for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY && globZ > calculateStartZ && globZ < calculateEndZ) { shared[tCurr][sharZ][sharY][sharX] = ( shared[tPrev][sharZ][sharY][sharX] + shared[tPrev][sharZ][sharY][sharX - 1] + shared[tPrev][sharZ][sharY][sharX + 1] + shared[tPrev][sharZ][sharY - 1][sharX] + shared[tPrev][sharZ][sharY + 1][sharX] + shared[tPrev][sharZ - 1][sharY][sharX] + shared[tPrev][sharZ + 1][sharY][sharX] ) / 7; } else if (sharX >= 0 && sharY >= 0 && sharZ >= 0) { shared[tCurr][sharZ][sharY][sharX] = shared[tPrev][sharZ][sharY][sharX]; } } } } } __syncthreads(); #pragma unroll for (int z = PER_THREAD_OVERLAPPED_COUNT_Z; z < PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z; z++) { int sharZ = sharedZ[z]; int globZ = globalZ[z]; #pragma unroll for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) { int sharY = sharedY[y]; int globY = globalY[y]; #pragma unroll for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) { int sharX = sharedX[x]; int globX = globalX[x]; if (globX >= 0 && globY >= 0 && globZ >= 0) { result.elements[globalIndex[z][y][x]] = shared[tCurr][sharZ][sharY][sharX]; } } } } } /******************** * END CUDA KERNELS * ********************/ Matrix initialize_device(Matrix A, bool copyToDevice) { Matrix deviceA; deviceA.width = A.width; deviceA.height = A.height; deviceA.depth = A.depth; deviceA.dimensions = A.dimensions; size_t sizeA = A.width * A.height * A.depth * sizeof(float); HANDLE_ERROR(cudaMalloc((void **) &deviceA.elements, sizeA)); if (copyToDevice) { HANDLE_ERROR(cudaMemcpy(deviceA.elements, A.elements, sizeA, cudaMemcpyHostToDevice)); } return deviceA; } void callKernel(Args args, Matrix A, Matrix B) { Matrix deviceA, deviceB; deviceA = initialize_device(A, true); deviceB = initialize_device(B, false); if (args.dimensions == 1) { dim3 blocks(max(args.size / TILE_WIDTH, 1)); dim3 threads(max(TILE_WIDTH / PER_THREAD_X, 1)); for (int t = 0; t < args.iterations / TILE_AGE; t++) { jacobi1d<<<blocks, threads>>>(deviceA, deviceB); // checkCUDAError("jacobi1d", true); swap(deviceA, deviceB); } } else if (args.dimensions == 2) { dim3 blocks(max(args.size / TILE_WIDTH, 1), max(args.size / TILE_HEIGHT, 1)); dim3 threads(max(TILE_WIDTH / PER_THREAD_X, 1), max(TILE_HEIGHT / PER_THREAD_Y, 1)); for (int t = 0; t < args.iterations / TILE_AGE; t++) { jacobi2d<<<blocks, threads>>>(deviceA, deviceB); // checkCUDAError("jacobi2d", true); swap(deviceA, deviceB); } } else { dim3 blocks(max(args.size / TILE_WIDTH, 1), max(args.size / TILE_HEIGHT, 1), max(args.size / TILE_DEPTH, 1)); dim3 threads(max(TILE_WIDTH / PER_THREAD_X, 1), max(TILE_HEIGHT / PER_THREAD_Y, 1), max(TILE_DEPTH / PER_THREAD_Z, 1)); for (int t = 0; t < args.iterations / TILE_AGE; t++) { jacobi3d<<<blocks, threads>>>(deviceA, deviceB); // checkCUDAError("jacobi3d", true); swap(deviceA, deviceB); } } HANDLE_ERROR(cudaMemcpy(B.elements, deviceA.elements, A.width * A.height * A.depth * sizeof(float), cudaMemcpyDeviceToHost)); } // Data output void print_data(float *data, int size, int dimensions) { // if (size > 32) { // cerr << "Data too big to print\n" << endl; // return; // } if (dimensions == 1) { for (int x = 0; x < size; x++) { printf("%.3f ", data[x]); } } else if (dimensions == 2) { for (int y = 0; y < size; y++) { for (int x = 0; x < size; x++) { printf("%.3f ", data[y * size + x]); } cout << endl; } } else if (dimensions == 3) { for (int z = 0; z < size; z++) { for (int y = 0; y < size; y++) { for (int x = 0; x < size; x++) { printf("%.3f ", data[z * size * size + y * size + x]); } cout << endl; } cout << endl; } } cout << endl << endl; } // Main int main(int argc, char *argv[]) { Args args = parse_arguments(argc, argv); Matrix A, B; A = initialize_matrix(args.dimensions, args.size, args.size, args.size); B = initialize_matrix(args.dimensions, args.size, args.size, args.size); float runtime; struct timeval start, end; gettimeofday(&start, NULL); callKernel(args, A, B); gettimeofday(&end, NULL); runtime = ((end.tv_sec - start.tv_sec) * 1000.0) + ((end.tv_usec - start.tv_usec) / 1000.0); printf("Processing Time: %4.4f milliseconds\n", runtime); if (args.debug) { print_data(B.elements, args.size, args.dimensions); } }
d5dd17eecb4d9d7e66b3e7a6c2ae0ca5e677db78.hip
// !!! This is a file automatically generated by hipify!!! //MIT License // //Copyright(c) 2020 Zheng Jiaqi @NUSComputing // //Permission is hereby granted, free of charge, to any person obtaining a copy //of this software and associated documentation files(the "Software"), to deal //in the Software without restriction, including without limitation the rights //to use, copy, modify, merge, publish, distribute, sublicense, and/or sell //copies of the Software, and to permit persons to whom the Software is //furnished to do so, subject to the following conditions : // //The above copyright notice and this permission notice shall be included in all //copies or substantial portions of the Software. // //THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE //AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, //OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE //SOFTWARE. #include <stdio.h> #include <unordered_map> #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include <helper_timer.h> #include <vector> #include <algorithm> // Parameters for CUDA kernel executions #define BLOCKX 16 #define BLOCKY 16 #define BLOCKSIZE 64 #define BAND 256 // For simplicity, just assume we never need to work with a smaller texture. #define THRESHOLD 1e-5 #define MARKER -32768 #define TOID(x, y, n) ((y) * (n) + (x)) #define debug_error 1 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // Global Variables short2 **pbaTextures, *pbaMargin; // Two textures used to compute 2D Voronoi Diagram short2 *pbaVoronoi, *pbaTemp; float **pbaDensity; float **pbaPrefixX, **pbaPrefixY, **pbaPrefixW; float *pbaTotalX, *pbaTotalY, *pbaTotalW; float *pbaEnergyTex, pbaEnergy_h; float pbaOmega; bool *constrainMask_d; int pbaScale; int pbaBuffer; // Current buffer int pbaMemSize; // Size (in bytes) of a texture int pbaTexSize; // Texture size (squared texture) // Fill an array with some value __global__ void kernelFillShort(short2* arr, short value, int texSize) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; arr[__mul24(y, texSize) + x] = make_short2(value, value); } //////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// Parallel Banding Algorithm plus ////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernelFloodDown(short2 *input, short2 *output, int size, int bandSize) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * bandSize; int id = TOID(tx, ty, size); short2 pixel1, pixel2; pixel1 = make_short2(MARKER, MARKER); for (int i = 0; i < bandSize; i++, id += size) { pixel2 = input[id]; if (pixel2.x != MARKER) pixel1 = pixel2; output[id] = pixel1; } } __global__ void kernelFloodUp(short2 *input, short2 *output, int size, int bandSize) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = (blockIdx.y + 1) * bandSize - 1; int id = TOID(tx, ty, size); short2 pixel1, pixel2; int dist1, dist2; pixel1 = make_short2(MARKER, MARKER); for (int i = 0; i < bandSize; i++, id -= size) { dist1 = abs(pixel1.y - ty + i); pixel2 = input[id]; dist2 = abs(pixel2.y - ty + i); if (dist2 < dist1) pixel1 = pixel2; output[id] = pixel1; } } __global__ void kernelPropagateInterband(short2 *input, short2 *output, int size, int bandSize) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int inc = bandSize * size; int ny, nid, nDist; short2 pixel; // Top row, look backward int ty = blockIdx.y * bandSize; int topId = TOID(tx, ty, size); int bottomId = TOID(tx, ty + bandSize - 1, size); int tid = blockIdx.y * size + tx; int bid = tid + (size * size / bandSize); pixel = input[topId]; int myDist = abs(pixel.y - ty); output[tid] = pixel; for (nid = bottomId - inc; nid >= 0; nid -= inc) { pixel = input[nid]; if (pixel.x != MARKER) { nDist = abs(pixel.y - ty); if (nDist < myDist) output[tid] = pixel; break; } } // Last row, look downward ty = ty + bandSize - 1; pixel = input[bottomId]; myDist = abs(pixel.y - ty); output[bid] = pixel; for (ny = ty + 1, nid = topId + inc; ny < size; ny += bandSize, nid += inc) { pixel = input[nid]; if (pixel.x != MARKER) { nDist = abs(pixel.y - ty); if (nDist < myDist) output[bid] = pixel; break; } } } __global__ void kernelUpdateVertical(short2 *color, short2 *margin, short2 *output, int size, int bandSize) { __shared__ short2 block[BLOCKSIZE][BLOCKSIZE]; int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * bandSize; short2 top = margin[blockIdx.y * size + tx]; short2 bottom = margin[(blockIdx.y + size / bandSize) * size + tx]; short2 pixel; int dist, myDist; int id = TOID(tx, ty, size); int n_step = bandSize / blockDim.x; for (int step = 0; step < n_step; ++step) { int y_start = blockIdx.y * bandSize + step * blockDim.x; int y_end = y_start + blockDim.x; for (ty = y_start; ty < y_end; ++ty, id += size) { pixel = color[id]; myDist = abs(pixel.y - ty); dist = abs(top.y - ty); if (dist < myDist) { myDist = dist; pixel = top; } dist = abs(bottom.y - ty); if (dist < myDist) pixel = bottom; block[threadIdx.x][ty - y_start] = make_short2(pixel.y, pixel.x); } __syncthreads(); int tid = TOID(blockIdx.y * bandSize + step * blockDim.x + threadIdx.x, \ blockIdx.x * blockDim.x, size); for (int i = 0; i < blockDim.x; ++i, tid += size) { output[tid] = block[i][threadIdx.x]; } __syncthreads(); } } #define LL long long __device__ bool dominate(LL x1, LL y1, LL x2, LL y2, LL x3, LL y3, LL x0) { LL k1 = y2 - y1, k2 = y3 - y2; return (k1 * (y1 + y2) + (x2 - x1) * ((x1 + x2) - (x0 << 1))) * k2 > \ (k2 * (y2 + y3) + (x3 - x2) * ((x2 + x3) - (x0 << 1))) * k1; } #undef LL __global__ void kernelProximatePoints(short2 *input, short2 *stack, int size, int bandSize) { int tx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __mul24(blockIdx.y, bandSize); int id = TOID(tx, ty, size); int lasty = -1; short2 last1, last2, current; last1.y = -1; last2.y = -1; for (int i = 0; i < bandSize; i++, id += size) { current = input[id]; if (current.x != MARKER) { while (last2.y >= 0) { if (!dominate(last1.x, last2.y, last2.x, \ lasty, current.x, current.y, tx)) break; lasty = last2.y; last2 = last1; if (last1.y >= 0) last1 = stack[TOID(tx, last1.y, size)]; } last1 = last2; last2 = make_short2(current.x, lasty); lasty = current.y; stack[id] = last2; } } // Store the pointer to the tail at the last pixel of this band if (lasty != ty + bandSize - 1) stack[TOID(tx, ty + bandSize - 1, size)] = make_short2(MARKER, lasty); } __global__ void kernelCreateForwardPointers(short2 *input, short2 *output, int size, int bandSize) { int tx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __mul24(blockIdx.y + 1, bandSize) - 1; int id = TOID(tx, ty, size); int lasty = -1, nexty; short2 current; // Get the tail pointer current = input[id]; if (current.x == MARKER) nexty = current.y; else nexty = ty; for (int i = 0; i < bandSize; i++, id -= size) if (ty - i == nexty) { current = make_short2(lasty, input[id].y); output[id] = current; lasty = nexty; nexty = current.y; } // Store the pointer to the head at the first pixel of this band if (lasty != ty - bandSize + 1) output[id + size] = make_short2(lasty, MARKER); } __global__ void kernelMergeBands(short2 *color, short2 *link, short2 *output, int size, int bandSize) { int tx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int band1 = blockIdx.y * 2; int band2 = band1 + 1; int firsty, lasty; short2 last1, last2, current; // last1 and last2: x component store the x coordinate of the site, // y component store the backward pointer // current: y component store the x coordinate of the site, // x component store the forward pointer // Get the two last items of the first list lasty = __mul24(band2, bandSize) - 1; last2 = make_short2(color[TOID(tx, lasty, size)].x, link[TOID(tx, lasty, size)].y); if (last2.x == MARKER) { lasty = last2.y; if (lasty >= 0) last2 = make_short2(color[TOID(tx, lasty, size)].x, link[TOID(tx, lasty, size)].y); else last2 = make_short2(MARKER, MARKER); } if (last2.y >= 0) { // Second item at the top of the stack last1 = make_short2(color[TOID(tx, last2.y, size)].x, link[TOID(tx, last2.y, size)].y); } // Get the first item of the second band firsty = __mul24(band2, bandSize); current = make_short2(link[TOID(tx, firsty, size)].x, color[TOID(tx, firsty, size)].x); if (current.y == MARKER) { firsty = current.x; if (firsty >= 0) current = make_short2(link[TOID(tx, firsty, size)].x, color[TOID(tx, firsty, size)].x); else current = make_short2(MARKER, MARKER); } // Count the number of item in the second band that survive so far. // Once it reaches 2, we can stop. int top = 0; while (top < 2 && current.y >= 0) { // While there's still something on the left while (last2.y >= 0) { if (!dominate(last1.x, last2.y, last2.x, \ lasty, current.y, firsty, tx)) break; lasty = last2.y; last2 = last1; top--; if (last1.y >= 0) last1 = make_short2(color[TOID(tx, last1.y, size)].x, link[TOID(tx, last1.y, size)].y); } // Update the current pointer output[TOID(tx, firsty, size)] = make_short2(current.x, lasty); if (lasty >= 0) output[TOID(tx, lasty, size)] = make_short2(firsty, last2.y); last1 = last2; last2 = make_short2(current.y, lasty); lasty = firsty; firsty = current.x; top = max(1, top + 1); // Advance the current pointer to the next one if (firsty >= 0) current = make_short2(link[TOID(tx, firsty, size)].x, color[TOID(tx, firsty, size)].x); else current = make_short2(MARKER, MARKER); } // Update the head and tail pointer. firsty = __mul24(band1, bandSize); lasty = __mul24(band2, bandSize); current = link[TOID(tx, firsty, size)]; if (current.y == MARKER && current.x < 0) { // No head? last1 = link[TOID(tx, lasty, size)]; if (last1.y == MARKER) current.x = last1.x; else current.x = lasty; output[TOID(tx, firsty, size)] = current; } firsty = __mul24(band1, bandSize) + bandSize - 1; lasty = __mul24(band2, bandSize) + bandSize - 1; current = link[TOID(tx, lasty, size)]; if (current.x == MARKER && current.y < 0) { // No tail? last1 = link[TOID(tx, firsty, size)]; if (last1.x == MARKER) current.y = last1.y; else current.y = firsty; output[TOID(tx, lasty, size)] = current; } } __global__ void kernelDoubleToSingleList(short2 *color, short2 *link, short2 *output, int size) { int tx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = blockIdx.y; int id = TOID(tx, ty, size); output[id] = make_short2(color[id].x, link[id].y); } __global__ void kernelColor(short2 *input, short2 *output, int size) { __shared__ short2 block[BLOCKSIZE][BLOCKSIZE]; int col = threadIdx.x; int tid = threadIdx.y; int tx = __mul24(blockIdx.x, blockDim.x) + col; int dx, dy, lasty; unsigned int best, dist; short2 last1, last2; lasty = size - 1; last2 = input[TOID(tx, lasty, size)]; if (last2.x == MARKER) { lasty = last2.y; last2 = input[TOID(tx, lasty, size)]; } if (last2.y >= 0) last1 = input[TOID(tx, last2.y, size)]; int y_start, y_end, n_step = size / blockDim.x; for (int step = 0; step < n_step; ++step) { y_start = size - step * blockDim.x - 1; y_end = size - (step + 1) * blockDim.x; for (int ty = y_start - tid; ty >= y_end; ty -= blockDim.y) { dx = last2.x - tx; dy = lasty - ty; best = dist = __mul24(dx, dx) + __mul24(dy, dy); while (last2.y >= 0) { dx = last1.x - tx; dy = last2.y - ty; dist = __mul24(dx, dx) + __mul24(dy, dy); if (dist > best) break; best = dist; lasty = last2.y; last2 = last1; if (last2.y >= 0) last1 = input[TOID(tx, last2.y, size)]; } block[threadIdx.x][ty - y_end] = make_short2(lasty, last2.x); } __syncthreads(); int iinc = size * blockDim.y; int id = TOID(y_end + threadIdx.x, blockIdx.x * blockDim.x + tid, size); for (int i = tid; i < blockDim.x; i += blockDim.y, id += iinc) { output[id] = block[i][threadIdx.x]; } __syncthreads(); } } //////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////// Centroidal Voronoi Tessellation //////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernelZoomIn(short2 *input, short2 *output, int size, int scale) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; int id = TOID(tx, ty, size); int tid = TOID(tx << scale, ty << scale, size << scale); short2 pixel = input[id]; output[tid] = (pixel.x == MARKER) ? make_short2(MARKER, MARKER) : make_short2(pixel.x << scale, pixel.y << scale); } __global__ void kernelDensityScaling(float *input, float *output, int size) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; float density = 0; for (int x = (tx << 1); x < (tx << 1) + 2; ++x) { for (int y = (ty << 1); y < (ty << 1) + 2; ++y) { density += input[TOID(x, y, size << 1)]; } } output[TOID(tx, ty, size)] = density / 4.0; } // compute the prefix sum of weight, x*weight and y*weight for each row extern __shared__ float tmpScan[]; __global__ void kernelComputeWeightedPrefixX(float *prefixW, float *prefixX, float *prefixY, float *density, int texWidth) { float *tmpX = tmpScan; float *tmpY = tmpX + blockDim.x; float *tmpWeight = tmpY + blockDim.x; float pW, pX, pY; int tid = threadIdx.x; int tx, ty = blockIdx.x; float lastX = 0.0f, lastY = 0.0f, lastW = 0.0f; int id = __mul24(ty, texWidth); for (int xx = 0; xx < texWidth; xx += blockDim.x) { tx = xx + tid; pW = density[id + tx]; pX = lastX + tx * pW; pY = lastY + ty * pW; pW = lastW + pW; tmpWeight[tid] = pW; tmpX[tid] = pX; tmpY[tid] = pY; __syncthreads(); for (int step = 1; step < blockDim.x; step *= 2) { // parallel prefix sum within a block if (tid >= step) { pW += tmpWeight[tid - step]; pX += tmpX[tid - step]; pY += tmpY[tid - step]; } __syncthreads(); tmpWeight[tid] = pW; tmpX[tid] = pX; tmpY[tid] = pY; __syncthreads(); } prefixX[id + tx] = tmpX[tid]; prefixY[id + tx] = tmpY[tid]; prefixW[id + tx] = tmpWeight[tid]; if (tid == 0) { lastX = tmpX[blockDim.x - 1]; lastY = tmpY[blockDim.x - 1]; lastW = tmpWeight[blockDim.x - 1]; } __syncthreads(); } } // 2D -> 1D Voronoi Diagram extern __shared__ short sharedVor[]; __global__ void kernelVoronoi1D(short2 *input, int *output, int texWidth) { int tid = threadIdx.x; int tx, ty = blockIdx.x; int id = __mul24(ty, texWidth); // Initialize for (tx = tid; tx < texWidth; tx += blockDim.x) sharedVor[tx] = MARKER; __syncthreads(); // Mark for (tx = tid; tx < texWidth; tx += blockDim.x) { short2 pixel = input[id + tx]; sharedVor[pixel.x] = pixel.y; } __syncthreads(); // Write id /= 2; for (tx = tid; tx < texWidth / 2; tx += blockDim.x) output[id + tx] = ((int *)sharedVor)[tx]; } __global__ void kernelTotal_X(short2 *voronoi, float *prefixX, float *prefixY, float *prefixW, \ float *totalX, float *totalY, float *totalW, int texWidth) { // Shared array to store the sums __shared__ float sharedTotalX[BAND]; // BAND = 256 __shared__ float sharedTotalY[BAND]; __shared__ float sharedTotalW[BAND]; __shared__ int startBlk[100], endBlk[100]; // 100 blocks is more than enough int count; int tid = threadIdx.x; int tx, ty = blockIdx.x, offset; int id = __mul24(ty, texWidth); short2 me, other; int margin = tid * BAND; if (margin < texWidth) { startBlk[tid] = 0; endBlk[tid] = texWidth; for (tx = 0; tx < texWidth; tx += blockDim.x) { me = voronoi[id + tx]; if (me.x >= margin) { startBlk[tid] = max(0, tx - int(blockDim.x)); break; } } for (; tx < texWidth; tx += blockDim.x) { me = voronoi[id + tx]; if (me.x >= margin + BAND) { endBlk[tid] = tx; break; } } } __syncthreads(); count = 0; // We process one BAND at a time. for (margin = 0; margin < texWidth; margin += BAND, count++) { // Only for the first iteration of tx // Make sure we detect the boundary at tx = 0 other.x = -1; // Left edge, scan through the row for (tx = startBlk[count] + tid; tx < endBlk[count]; tx += blockDim.x) { if (tx > 0) other = voronoi[id + tx - 1]; me = voronoi[id + tx]; offset = me.x - margin; // margin <= me.x < margin + BAND && the closest site of the previous pixel is different if (offset >= 0 && offset < BAND && other.x < me.x) { if (tx > 0) { sharedTotalX[offset] = prefixX[id + tx - 1]; sharedTotalY[offset] = prefixY[id + tx - 1]; sharedTotalW[offset] = prefixW[id + tx - 1]; } else { sharedTotalX[offset] = 0.0f; sharedTotalY[offset] = 0.0f; sharedTotalW[offset] = 0.0f; } } } __syncthreads(); // Right edge for (tx = startBlk[count] + tid; tx < endBlk[count]; tx += blockDim.x) { me = voronoi[id + tx]; offset = me.x - margin; if (tx < texWidth - 1) other = voronoi[id + tx + 1]; else other.x = texWidth; // margin <= me.x < margin + BAND && the closest site of the next pixel is different if (offset >= 0 && offset < BAND && me.x < other.x) { sharedTotalX[offset] = prefixX[id + tx] - sharedTotalX[offset]; sharedTotalY[offset] = prefixY[id + tx] - sharedTotalY[offset]; sharedTotalW[offset] = prefixW[id + tx] - sharedTotalW[offset]; } } __syncthreads(); // Write for (tx = tid; tx < BAND; tx += blockDim.x) if (margin + tx < texWidth) { totalX[id + margin + tx] = sharedTotalX[tx]; totalY[id + margin + tx] = sharedTotalY[tx]; totalW[id + margin + tx] = sharedTotalW[tx]; } } } __global__ void kernelScan_Y(short *voronoi, float *totalX, float *totalY, float *totalW, int size) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * BLOCKSIZE; int id = TOID(tx, ty, size), tid; short pixel, last = MARKER; float tmpX = 0.0, tmpY = 0.0, tmpW = 0.0; for (int i = 0; i < BLOCKSIZE; ++i, ++ty, id += size) { __syncthreads(); pixel = voronoi[id]; if (pixel != last) { if (last != MARKER) { tid = TOID(tx, last, size); atomicAdd(totalX + tid, tmpX); atomicAdd(totalY + tid, tmpY); atomicAdd(totalW + tid, tmpW); } tmpX = tmpY = tmpW = 0.0; last = pixel; } if (pixel != MARKER && pixel != ty) { tmpX += totalX[id]; tmpY += totalY[id]; tmpW += totalW[id]; } } if (last != MARKER) { tid = TOID(tx, last, size); atomicAdd(totalX + tid, tmpX); atomicAdd(totalY + tid, tmpY); atomicAdd(totalW + tid, tmpW); } } __global__ void kernelDebug_Y(short *voronoi, float *totalX, float *totalY, float *totalW, int size) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = 0; int id = TOID(tx, ty, size); short pixel, last = MARKER; for (; ty < size; ++ty, id += size) { pixel = voronoi[id]; if (pixel != last) { if (last != MARKER) printf("%d %d\n", tx, last); last = pixel; } } if (last != MARKER) printf("%d %d\n", tx, last); } __global__ void kernelUpdateSites(short *voronoi, float *totalX, float *totalY, float *totalW, float *density, short2 *output, bool *mask, int size, float omega) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; float pX, pY, pW; int id = TOID(tx, ty, size); short pixel, seed = voronoi[id]; if (seed != ty) return; pX = totalX[id]; pY = totalY[id]; pW = totalW[id]; float _x = pX / pW, _y = pY / pW; short2 rc = make_short2(tx + (_x - tx) * omega + 0.5f, ty + (_y - ty) * omega + 0.5f); rc.x = max(min(rc.x, size - 1), 0); rc.y = max(min(rc.y, size - 1), 0); if (mask[id] || density[TOID(rc.x, rc.y, size)] == 0) rc = make_short2(tx, ty); id = TOID(rc.x, rc.y, size); output[id] = rc; } //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernelCalcEnergy(short2 *voronoi, float *density, float *nrgTex, int size) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; int id = TOID(tx, ty, size); short2 site = voronoi[id]; float dx = (site.x - tx) * 1.0f / size; float dy = (site.y - ty) * 1.0f / size; float dist = dx * dx + dy * dy; nrgTex[id] = density[id] * dist; } template <unsigned int blockSize> __device__ void warpReduce(volatile float *sdata, unsigned int tid) { if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } template <unsigned int blockSize> __global__ void kernelReduce(float *input, float *output, unsigned int n) { __shared__ float sdata[blockSize]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize * 2) + tid; unsigned int gridSize = blockSize * 2 * gridDim.x; sdata[tid] = 0; while (i < n) { sdata[tid] += input[i] + input[i + blockSize]; i += gridSize; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32) warpReduce<blockSize>(sdata, tid); if (tid == 0) output[blockIdx.x] = sdata[0]; } //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////// Initialization //////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// std::unordered_map<int,int> m_1, m_2, m_3, m_4; void gcvtInitialization(int textureSize) { m_1[256] = 4, m_2[256] = 32, m_3[256] = 16; m_1[512] = 8, m_2[512] = 32, m_3[512] = 16; m_1[1024] = 16, m_2[1024] = 32, m_3[1024] = 16; m_1[2048] = 32, m_2[2048] = 32, m_3[2048] = 8; m_1[4096] = 64, m_2[4096] = 32, m_3[4096] = 8; m_1[8192] = 128, m_2[8192] = 32, m_3[8192] = 4; pbaTexSize = textureSize; pbaMemSize = pbaTexSize * pbaTexSize * sizeof(short2); pbaTextures = (short2 **) malloc(2 * sizeof(short2 *)); pbaDensity = (float **) malloc(10 * sizeof(float *)); pbaPrefixX = (float **) malloc(10 * sizeof(float *)); pbaPrefixY = (float **) malloc(10 * sizeof(float *)); pbaPrefixW = (float **) malloc(10 * sizeof(float *)); hipMalloc((void **) &pbaTextures[0], pbaMemSize); hipMalloc((void **) &pbaTextures[1], pbaMemSize); hipMalloc((void **) &pbaMargin, m_1[pbaTexSize] * pbaTexSize * sizeof(short2)); hipMalloc((void **) &pbaTotalX, pbaTexSize * pbaTexSize * sizeof(float)); hipMalloc((void **) &pbaTotalY, pbaTexSize * pbaTexSize * sizeof(float)); hipMalloc((void **) &pbaTotalW, pbaTexSize * pbaTexSize * sizeof(float)); hipMalloc((void **) &pbaEnergyTex, pbaTexSize * pbaTexSize * sizeof(float)); hipMalloc((void **) &constrainMask_d, pbaTexSize * pbaTexSize * sizeof(bool)); for(int i = 0; i < 10; ++i) { if((pbaTexSize>>i) < 256) break; hipMalloc((void **) &pbaDensity[i], (pbaTexSize>>i) * (pbaTexSize>>i) * sizeof(float)); hipMalloc((void **) &pbaPrefixX[i], (pbaTexSize>>i) * (pbaTexSize>>i) * sizeof(float)); hipMalloc((void **) &pbaPrefixY[i], (pbaTexSize>>i) * (pbaTexSize>>i) * sizeof(float)); hipMalloc((void **) &pbaPrefixW[i], (pbaTexSize>>i) * (pbaTexSize>>i) * sizeof(float)); } } // Deallocate all allocated memory void pbaCVDDeinitialization() { hipFree(pbaTextures[0]); hipFree(pbaTextures[1]); hipFree(pbaMargin); for(int i = 0; i < 10; ++i) { if((pbaTexSize>>i) < 256) break; hipFree(pbaDensity[i]); hipFree(pbaPrefixX[i]); hipFree(pbaPrefixY[i]); hipFree(pbaPrefixW[i]); } hipFree(pbaTotalX); hipFree(pbaTotalY); hipFree(pbaTotalW); hipFree(pbaEnergyTex); hipFree(constrainMask_d); free(pbaTextures); free(pbaDensity); free(pbaPrefixX); free(pbaPrefixY); free(pbaPrefixW); } // Copy input to GPU void pba2DInitializeInput(float *density, bool *mask) { hipMemcpy(pbaDensity[0], density, pbaTexSize * pbaTexSize * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(constrainMask_d, mask, pbaTexSize * pbaTexSize * sizeof(bool), hipMemcpyHostToDevice); pbaVoronoi = pbaTextures[0]; pbaTemp = pbaTextures[1]; pbaBuffer = 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// Parallel Banding Algorithm plus ////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// // Phase 1 of PBA. m1 must divides texture size and equal or less than size / 64 void pba2DPhase1(int m1) { dim3 block = dim3(BLOCKSIZE); dim3 grid = dim3(pbaTexSize / block.x, m1); hipLaunchKernelGGL(( kernelFloodDown), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaBuffer], pbaTextures[pbaBuffer], pbaTexSize, pbaTexSize / m1); hipLaunchKernelGGL(( kernelFloodUp), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaBuffer], pbaTextures[pbaBuffer], pbaTexSize, pbaTexSize / m1); hipLaunchKernelGGL(( kernelPropagateInterband), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaBuffer], pbaMargin, pbaTexSize, pbaTexSize / m1); hipLaunchKernelGGL(( kernelUpdateVertical), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaBuffer], pbaMargin, pbaTextures[1^pbaBuffer], pbaTexSize, pbaTexSize / m1); } // Phase 2 of PBA. m2 must divides texture size void pba2DPhase2(int m2) { // Compute proximate points locally in each band dim3 block = dim3(BLOCKSIZE); dim3 grid = dim3(pbaTexSize / block.x, m2); hipLaunchKernelGGL(( kernelProximatePoints), dim3(grid), dim3(block) , 0, 0, pbaTextures[1^pbaBuffer], pbaTextures[pbaBuffer], pbaTexSize, pbaTexSize / m2); hipLaunchKernelGGL(( kernelCreateForwardPointers), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaBuffer], pbaTextures[pbaBuffer], pbaTexSize, pbaTexSize / m2); // Repeatly merging two bands into one for (int noBand = m2; noBand > 1; noBand /= 2) { grid = dim3(pbaTexSize / block.x, noBand / 2); hipLaunchKernelGGL(( kernelMergeBands), dim3(grid), dim3(block) , 0, 0, pbaTextures[1^pbaBuffer], pbaTextures[pbaBuffer], pbaTextures[pbaBuffer], pbaTexSize, pbaTexSize / noBand); } // Replace the forward link with the X coordinate of the seed to remove // the need of looking at the other texture. We need it for coloring. grid = dim3(pbaTexSize / block.x, pbaTexSize); hipLaunchKernelGGL(( kernelDoubleToSingleList), dim3(grid), dim3(block) , 0, 0, pbaTextures[1^pbaBuffer], pbaTextures[pbaBuffer], pbaTextures[pbaBuffer], pbaTexSize); } // Phase 3 of PBA. m3 must divides texture size and equal or less than 64 void pba2DPhase3(int m3) { dim3 block = dim3(BLOCKSIZE, m3); dim3 grid = dim3(pbaTexSize / block.x); hipLaunchKernelGGL(( kernelColor), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaBuffer], pbaTextures[1^pbaBuffer], pbaTexSize); } void pba2DCompute(int m1, int m2, int m3) { pba2DPhase1(m1); pba2DPhase2(m2); pba2DPhase3(m3); pbaVoronoi = pbaTextures[1^pbaBuffer]; pbaTemp = pbaTextures[pbaBuffer]; pbaBuffer = 1^pbaBuffer; } //////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////// Centroidal Voronoi Tessellation //////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// void pbaCVDDensityScaling(int k) { dim3 block(BLOCKX, BLOCKY); for(int i = 1; i < k; ++i) { dim3 grid((pbaTexSize >> i) / block.x, (pbaTexSize >> i) / block.y); hipLaunchKernelGGL(( kernelDensityScaling), dim3(grid), dim3(block) , 0, 0, pbaDensity[i - 1], pbaDensity[i], pbaTexSize >> i); } } void pbaCVDComputeWeightedPrefix(int k) { dim3 block(BLOCKSIZE); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); int ns = BLOCKSIZE * 3 * sizeof(float); for(int i = 0; i < k; ++i) { dim3 grid(pbaTexSize >> i); hipLaunchKernelGGL(( kernelComputeWeightedPrefixX), dim3(grid), dim3(block), ns , 0, pbaPrefixW[i], pbaPrefixX[i], pbaPrefixY[i], pbaDensity[i], pbaTexSize >> i); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); } } void pbaCVDComputeCentroid() { dim3 block(BLOCKSIZE); dim3 grid(pbaTexSize); int ns = pbaTexSize * sizeof(short); hipLaunchKernelGGL(( kernelVoronoi1D), dim3(grid), dim3(block), ns , 0, pbaVoronoi, (int *) pbaTemp, pbaTexSize); hipLaunchKernelGGL(( kernelTotal_X), dim3(grid), dim3(block) , 0, 0, pbaVoronoi, pbaPrefixX[pbaScale], pbaPrefixY[pbaScale], pbaPrefixW[pbaScale], \ pbaTotalX, pbaTotalY, pbaTotalW, pbaTexSize); block = dim3(BLOCKSIZE); grid = dim3(pbaTexSize / block.x, pbaTexSize / block.x); hipLaunchKernelGGL(( kernelScan_Y), dim3(grid), dim3(block) , 0, 0, (short *) pbaTemp, pbaTotalX, pbaTotalY, pbaTotalW, pbaTexSize); } void pbaCVDUpdateSites() { dim3 block(BLOCKX, BLOCKY); dim3 grid(pbaTexSize / block.x, pbaTexSize / block.y); hipLaunchKernelGGL(( kernelFillShort), dim3(grid), dim3(block) , 0, 0, pbaVoronoi, MARKER, pbaTexSize); hipLaunchKernelGGL(( kernelUpdateSites), dim3(grid), dim3(block) , 0, 0, (short *) pbaTemp, pbaTotalX, pbaTotalY, pbaTotalW, pbaDensity[pbaScale], \ pbaVoronoi, constrainMask_d, pbaTexSize, pbaOmega); } void pbaCVDZoomIn() { dim3 block(BLOCKX, BLOCKY); dim3 grid1(pbaTexSize / block.x, pbaTexSize / block.y); dim3 grid2((pbaTexSize << 1) / block.x, (pbaTexSize << 1) / block.y); hipLaunchKernelGGL(( kernelFillShort), dim3(grid2), dim3(block) , 0, 0, pbaTemp, MARKER, pbaTexSize << 1); hipLaunchKernelGGL(( kernelZoomIn), dim3(grid1), dim3(block) , 0, 0, pbaVoronoi, pbaTemp, pbaTexSize, 1); pbaBuffer = 1^pbaBuffer; short2 *tmp_ptr = pbaVoronoi; pbaVoronoi = pbaTemp; pbaTemp = tmp_ptr; } //////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////// Calculate CVT Engergy Function ////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// float pbaCVDCalcEnergy() { dim3 block(BLOCKX, BLOCKY); dim3 grid(pbaTexSize / block.x, pbaTexSize / block.y); hipLaunchKernelGGL(( kernelCalcEnergy), dim3(grid), dim3(block) , 0, 0, pbaVoronoi, pbaDensity[pbaScale], pbaEnergyTex, pbaTexSize); const int blockSize = 512; int n = pbaTexSize * pbaTexSize; int blocksPerGrid; do { blocksPerGrid = min(int(::ceil((1.*n) / blockSize)), 32768); hipLaunchKernelGGL(( kernelReduce<blockSize>), dim3(blocksPerGrid), dim3(blockSize) , 0, 0, pbaEnergyTex, pbaEnergyTex, n); n = blocksPerGrid; } while (n > blockSize); if (n > 1) { hipLaunchKernelGGL(( kernelReduce<blockSize>), dim3(1), dim3(blockSize) , 0, 0, pbaEnergyTex, pbaEnergyTex, n); } hipMemcpy(&pbaEnergy_h, pbaEnergyTex, sizeof(float), hipMemcpyDeviceToHost); return pbaEnergy_h * powf(2.0, pbaScale * 2.0); } int gcvtIterations; void gCVT(short *Voronoi, float *density_d, bool *mask, int size, int depth, int maxIter) { gcvtInitialization(size); for (int i = 0; i < depth; ++i) if ((pbaTexSize >> i) < 256) { depth = i; break; } pba2DInitializeInput(density_d, mask); pbaCVDDensityScaling(depth); pbaCVDComputeWeightedPrefix(depth); pbaScale = 0; gcvtIterations = 0; pbaTexSize >>= depth; pbaTexSize <<= 1; hipMemcpy(pbaVoronoi, Voronoi, pbaTexSize * pbaTexSize * sizeof(short2), hipMemcpyHostToDevice); pbaTexSize >>= 1; float Energy, lastEnergy = 1e18, diffEnergy, gradientEnergy; std::vector <int> switch_iter; switch_iter.clear(); pbaOmega = 2.0; for (pbaScale = depth - 1; ~pbaScale; --pbaScale) { pbaTexSize <<= 1; do { pba2DCompute(m_1[pbaTexSize], m_2[pbaTexSize], m_3[pbaTexSize]); if (gcvtIterations % 10 == 0) Energy = pbaCVDCalcEnergy(); pbaCVDComputeCentroid(); pbaCVDUpdateSites(); gcvtIterations++; if (gcvtIterations % 10 == 0) { diffEnergy = lastEnergy - Energy; gradientEnergy = diffEnergy / 10.0; //printf("Iter %d: %f %f\n", gcvtIterations, Energy, gradientEnergy); pbaOmega = min(2.0, 1.0 + diffEnergy); if (pbaScale) { if (gradientEnergy < 3e-1) break; } else { if (gradientEnergy < THRESHOLD) break; } lastEnergy = Energy; } } while (gcvtIterations < maxIter); switch_iter.push_back(gcvtIterations); if (pbaScale) pbaCVDZoomIn(); } pba2DCompute(m_1[pbaTexSize], m_2[pbaTexSize], m_3[pbaTexSize]); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); hipMemcpy(Voronoi, pbaVoronoi, pbaMemSize, hipMemcpyDeviceToHost); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); pbaCVDDeinitialization(); }
d5dd17eecb4d9d7e66b3e7a6c2ae0ca5e677db78.cu
//MIT License // //Copyright(c) 2020 Zheng Jiaqi @NUSComputing // //Permission is hereby granted, free of charge, to any person obtaining a copy //of this software and associated documentation files(the "Software"), to deal //in the Software without restriction, including without limitation the rights //to use, copy, modify, merge, publish, distribute, sublicense, and/or sell //copies of the Software, and to permit persons to whom the Software is //furnished to do so, subject to the following conditions : // //The above copyright notice and this permission notice shall be included in all //copies or substantial portions of the Software. // //THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE //AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, //OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE //SOFTWARE. #include <stdio.h> #include <unordered_map> #include <cuda_runtime.h> #include <device_functions.h> #include <helper_timer.h> #include <vector> #include <algorithm> // Parameters for CUDA kernel executions #define BLOCKX 16 #define BLOCKY 16 #define BLOCKSIZE 64 #define BAND 256 // For simplicity, just assume we never need to work with a smaller texture. #define THRESHOLD 1e-5 #define MARKER -32768 #define TOID(x, y, n) ((y) * (n) + (x)) #define debug_error 1 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // Global Variables short2 **pbaTextures, *pbaMargin; // Two textures used to compute 2D Voronoi Diagram short2 *pbaVoronoi, *pbaTemp; float **pbaDensity; float **pbaPrefixX, **pbaPrefixY, **pbaPrefixW; float *pbaTotalX, *pbaTotalY, *pbaTotalW; float *pbaEnergyTex, pbaEnergy_h; float pbaOmega; bool *constrainMask_d; int pbaScale; int pbaBuffer; // Current buffer int pbaMemSize; // Size (in bytes) of a texture int pbaTexSize; // Texture size (squared texture) // Fill an array with some value __global__ void kernelFillShort(short2* arr, short value, int texSize) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; arr[__mul24(y, texSize) + x] = make_short2(value, value); } //////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// Parallel Banding Algorithm plus ////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernelFloodDown(short2 *input, short2 *output, int size, int bandSize) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * bandSize; int id = TOID(tx, ty, size); short2 pixel1, pixel2; pixel1 = make_short2(MARKER, MARKER); for (int i = 0; i < bandSize; i++, id += size) { pixel2 = input[id]; if (pixel2.x != MARKER) pixel1 = pixel2; output[id] = pixel1; } } __global__ void kernelFloodUp(short2 *input, short2 *output, int size, int bandSize) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = (blockIdx.y + 1) * bandSize - 1; int id = TOID(tx, ty, size); short2 pixel1, pixel2; int dist1, dist2; pixel1 = make_short2(MARKER, MARKER); for (int i = 0; i < bandSize; i++, id -= size) { dist1 = abs(pixel1.y - ty + i); pixel2 = input[id]; dist2 = abs(pixel2.y - ty + i); if (dist2 < dist1) pixel1 = pixel2; output[id] = pixel1; } } __global__ void kernelPropagateInterband(short2 *input, short2 *output, int size, int bandSize) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int inc = bandSize * size; int ny, nid, nDist; short2 pixel; // Top row, look backward int ty = blockIdx.y * bandSize; int topId = TOID(tx, ty, size); int bottomId = TOID(tx, ty + bandSize - 1, size); int tid = blockIdx.y * size + tx; int bid = tid + (size * size / bandSize); pixel = input[topId]; int myDist = abs(pixel.y - ty); output[tid] = pixel; for (nid = bottomId - inc; nid >= 0; nid -= inc) { pixel = input[nid]; if (pixel.x != MARKER) { nDist = abs(pixel.y - ty); if (nDist < myDist) output[tid] = pixel; break; } } // Last row, look downward ty = ty + bandSize - 1; pixel = input[bottomId]; myDist = abs(pixel.y - ty); output[bid] = pixel; for (ny = ty + 1, nid = topId + inc; ny < size; ny += bandSize, nid += inc) { pixel = input[nid]; if (pixel.x != MARKER) { nDist = abs(pixel.y - ty); if (nDist < myDist) output[bid] = pixel; break; } } } __global__ void kernelUpdateVertical(short2 *color, short2 *margin, short2 *output, int size, int bandSize) { __shared__ short2 block[BLOCKSIZE][BLOCKSIZE]; int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * bandSize; short2 top = margin[blockIdx.y * size + tx]; short2 bottom = margin[(blockIdx.y + size / bandSize) * size + tx]; short2 pixel; int dist, myDist; int id = TOID(tx, ty, size); int n_step = bandSize / blockDim.x; for (int step = 0; step < n_step; ++step) { int y_start = blockIdx.y * bandSize + step * blockDim.x; int y_end = y_start + blockDim.x; for (ty = y_start; ty < y_end; ++ty, id += size) { pixel = color[id]; myDist = abs(pixel.y - ty); dist = abs(top.y - ty); if (dist < myDist) { myDist = dist; pixel = top; } dist = abs(bottom.y - ty); if (dist < myDist) pixel = bottom; block[threadIdx.x][ty - y_start] = make_short2(pixel.y, pixel.x); } __syncthreads(); int tid = TOID(blockIdx.y * bandSize + step * blockDim.x + threadIdx.x, \ blockIdx.x * blockDim.x, size); for (int i = 0; i < blockDim.x; ++i, tid += size) { output[tid] = block[i][threadIdx.x]; } __syncthreads(); } } #define LL long long __device__ bool dominate(LL x1, LL y1, LL x2, LL y2, LL x3, LL y3, LL x0) { LL k1 = y2 - y1, k2 = y3 - y2; return (k1 * (y1 + y2) + (x2 - x1) * ((x1 + x2) - (x0 << 1))) * k2 > \ (k2 * (y2 + y3) + (x3 - x2) * ((x2 + x3) - (x0 << 1))) * k1; } #undef LL __global__ void kernelProximatePoints(short2 *input, short2 *stack, int size, int bandSize) { int tx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __mul24(blockIdx.y, bandSize); int id = TOID(tx, ty, size); int lasty = -1; short2 last1, last2, current; last1.y = -1; last2.y = -1; for (int i = 0; i < bandSize; i++, id += size) { current = input[id]; if (current.x != MARKER) { while (last2.y >= 0) { if (!dominate(last1.x, last2.y, last2.x, \ lasty, current.x, current.y, tx)) break; lasty = last2.y; last2 = last1; if (last1.y >= 0) last1 = stack[TOID(tx, last1.y, size)]; } last1 = last2; last2 = make_short2(current.x, lasty); lasty = current.y; stack[id] = last2; } } // Store the pointer to the tail at the last pixel of this band if (lasty != ty + bandSize - 1) stack[TOID(tx, ty + bandSize - 1, size)] = make_short2(MARKER, lasty); } __global__ void kernelCreateForwardPointers(short2 *input, short2 *output, int size, int bandSize) { int tx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __mul24(blockIdx.y + 1, bandSize) - 1; int id = TOID(tx, ty, size); int lasty = -1, nexty; short2 current; // Get the tail pointer current = input[id]; if (current.x == MARKER) nexty = current.y; else nexty = ty; for (int i = 0; i < bandSize; i++, id -= size) if (ty - i == nexty) { current = make_short2(lasty, input[id].y); output[id] = current; lasty = nexty; nexty = current.y; } // Store the pointer to the head at the first pixel of this band if (lasty != ty - bandSize + 1) output[id + size] = make_short2(lasty, MARKER); } __global__ void kernelMergeBands(short2 *color, short2 *link, short2 *output, int size, int bandSize) { int tx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int band1 = blockIdx.y * 2; int band2 = band1 + 1; int firsty, lasty; short2 last1, last2, current; // last1 and last2: x component store the x coordinate of the site, // y component store the backward pointer // current: y component store the x coordinate of the site, // x component store the forward pointer // Get the two last items of the first list lasty = __mul24(band2, bandSize) - 1; last2 = make_short2(color[TOID(tx, lasty, size)].x, link[TOID(tx, lasty, size)].y); if (last2.x == MARKER) { lasty = last2.y; if (lasty >= 0) last2 = make_short2(color[TOID(tx, lasty, size)].x, link[TOID(tx, lasty, size)].y); else last2 = make_short2(MARKER, MARKER); } if (last2.y >= 0) { // Second item at the top of the stack last1 = make_short2(color[TOID(tx, last2.y, size)].x, link[TOID(tx, last2.y, size)].y); } // Get the first item of the second band firsty = __mul24(band2, bandSize); current = make_short2(link[TOID(tx, firsty, size)].x, color[TOID(tx, firsty, size)].x); if (current.y == MARKER) { firsty = current.x; if (firsty >= 0) current = make_short2(link[TOID(tx, firsty, size)].x, color[TOID(tx, firsty, size)].x); else current = make_short2(MARKER, MARKER); } // Count the number of item in the second band that survive so far. // Once it reaches 2, we can stop. int top = 0; while (top < 2 && current.y >= 0) { // While there's still something on the left while (last2.y >= 0) { if (!dominate(last1.x, last2.y, last2.x, \ lasty, current.y, firsty, tx)) break; lasty = last2.y; last2 = last1; top--; if (last1.y >= 0) last1 = make_short2(color[TOID(tx, last1.y, size)].x, link[TOID(tx, last1.y, size)].y); } // Update the current pointer output[TOID(tx, firsty, size)] = make_short2(current.x, lasty); if (lasty >= 0) output[TOID(tx, lasty, size)] = make_short2(firsty, last2.y); last1 = last2; last2 = make_short2(current.y, lasty); lasty = firsty; firsty = current.x; top = max(1, top + 1); // Advance the current pointer to the next one if (firsty >= 0) current = make_short2(link[TOID(tx, firsty, size)].x, color[TOID(tx, firsty, size)].x); else current = make_short2(MARKER, MARKER); } // Update the head and tail pointer. firsty = __mul24(band1, bandSize); lasty = __mul24(band2, bandSize); current = link[TOID(tx, firsty, size)]; if (current.y == MARKER && current.x < 0) { // No head? last1 = link[TOID(tx, lasty, size)]; if (last1.y == MARKER) current.x = last1.x; else current.x = lasty; output[TOID(tx, firsty, size)] = current; } firsty = __mul24(band1, bandSize) + bandSize - 1; lasty = __mul24(band2, bandSize) + bandSize - 1; current = link[TOID(tx, lasty, size)]; if (current.x == MARKER && current.y < 0) { // No tail? last1 = link[TOID(tx, firsty, size)]; if (last1.x == MARKER) current.y = last1.y; else current.y = firsty; output[TOID(tx, lasty, size)] = current; } } __global__ void kernelDoubleToSingleList(short2 *color, short2 *link, short2 *output, int size) { int tx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = blockIdx.y; int id = TOID(tx, ty, size); output[id] = make_short2(color[id].x, link[id].y); } __global__ void kernelColor(short2 *input, short2 *output, int size) { __shared__ short2 block[BLOCKSIZE][BLOCKSIZE]; int col = threadIdx.x; int tid = threadIdx.y; int tx = __mul24(blockIdx.x, blockDim.x) + col; int dx, dy, lasty; unsigned int best, dist; short2 last1, last2; lasty = size - 1; last2 = input[TOID(tx, lasty, size)]; if (last2.x == MARKER) { lasty = last2.y; last2 = input[TOID(tx, lasty, size)]; } if (last2.y >= 0) last1 = input[TOID(tx, last2.y, size)]; int y_start, y_end, n_step = size / blockDim.x; for (int step = 0; step < n_step; ++step) { y_start = size - step * blockDim.x - 1; y_end = size - (step + 1) * blockDim.x; for (int ty = y_start - tid; ty >= y_end; ty -= blockDim.y) { dx = last2.x - tx; dy = lasty - ty; best = dist = __mul24(dx, dx) + __mul24(dy, dy); while (last2.y >= 0) { dx = last1.x - tx; dy = last2.y - ty; dist = __mul24(dx, dx) + __mul24(dy, dy); if (dist > best) break; best = dist; lasty = last2.y; last2 = last1; if (last2.y >= 0) last1 = input[TOID(tx, last2.y, size)]; } block[threadIdx.x][ty - y_end] = make_short2(lasty, last2.x); } __syncthreads(); int iinc = size * blockDim.y; int id = TOID(y_end + threadIdx.x, blockIdx.x * blockDim.x + tid, size); for (int i = tid; i < blockDim.x; i += blockDim.y, id += iinc) { output[id] = block[i][threadIdx.x]; } __syncthreads(); } } //////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////// Centroidal Voronoi Tessellation //////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernelZoomIn(short2 *input, short2 *output, int size, int scale) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; int id = TOID(tx, ty, size); int tid = TOID(tx << scale, ty << scale, size << scale); short2 pixel = input[id]; output[tid] = (pixel.x == MARKER) ? make_short2(MARKER, MARKER) : make_short2(pixel.x << scale, pixel.y << scale); } __global__ void kernelDensityScaling(float *input, float *output, int size) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; float density = 0; for (int x = (tx << 1); x < (tx << 1) + 2; ++x) { for (int y = (ty << 1); y < (ty << 1) + 2; ++y) { density += input[TOID(x, y, size << 1)]; } } output[TOID(tx, ty, size)] = density / 4.0; } // compute the prefix sum of weight, x*weight and y*weight for each row extern __shared__ float tmpScan[]; __global__ void kernelComputeWeightedPrefixX(float *prefixW, float *prefixX, float *prefixY, float *density, int texWidth) { float *tmpX = tmpScan; float *tmpY = tmpX + blockDim.x; float *tmpWeight = tmpY + blockDim.x; float pW, pX, pY; int tid = threadIdx.x; int tx, ty = blockIdx.x; float lastX = 0.0f, lastY = 0.0f, lastW = 0.0f; int id = __mul24(ty, texWidth); for (int xx = 0; xx < texWidth; xx += blockDim.x) { tx = xx + tid; pW = density[id + tx]; pX = lastX + tx * pW; pY = lastY + ty * pW; pW = lastW + pW; tmpWeight[tid] = pW; tmpX[tid] = pX; tmpY[tid] = pY; __syncthreads(); for (int step = 1; step < blockDim.x; step *= 2) { // parallel prefix sum within a block if (tid >= step) { pW += tmpWeight[tid - step]; pX += tmpX[tid - step]; pY += tmpY[tid - step]; } __syncthreads(); tmpWeight[tid] = pW; tmpX[tid] = pX; tmpY[tid] = pY; __syncthreads(); } prefixX[id + tx] = tmpX[tid]; prefixY[id + tx] = tmpY[tid]; prefixW[id + tx] = tmpWeight[tid]; if (tid == 0) { lastX = tmpX[blockDim.x - 1]; lastY = tmpY[blockDim.x - 1]; lastW = tmpWeight[blockDim.x - 1]; } __syncthreads(); } } // 2D -> 1D Voronoi Diagram extern __shared__ short sharedVor[]; __global__ void kernelVoronoi1D(short2 *input, int *output, int texWidth) { int tid = threadIdx.x; int tx, ty = blockIdx.x; int id = __mul24(ty, texWidth); // Initialize for (tx = tid; tx < texWidth; tx += blockDim.x) sharedVor[tx] = MARKER; __syncthreads(); // Mark for (tx = tid; tx < texWidth; tx += blockDim.x) { short2 pixel = input[id + tx]; sharedVor[pixel.x] = pixel.y; } __syncthreads(); // Write id /= 2; for (tx = tid; tx < texWidth / 2; tx += blockDim.x) output[id + tx] = ((int *)sharedVor)[tx]; } __global__ void kernelTotal_X(short2 *voronoi, float *prefixX, float *prefixY, float *prefixW, \ float *totalX, float *totalY, float *totalW, int texWidth) { // Shared array to store the sums __shared__ float sharedTotalX[BAND]; // BAND = 256 __shared__ float sharedTotalY[BAND]; __shared__ float sharedTotalW[BAND]; __shared__ int startBlk[100], endBlk[100]; // 100 blocks is more than enough int count; int tid = threadIdx.x; int tx, ty = blockIdx.x, offset; int id = __mul24(ty, texWidth); short2 me, other; int margin = tid * BAND; if (margin < texWidth) { startBlk[tid] = 0; endBlk[tid] = texWidth; for (tx = 0; tx < texWidth; tx += blockDim.x) { me = voronoi[id + tx]; if (me.x >= margin) { startBlk[tid] = max(0, tx - int(blockDim.x)); break; } } for (; tx < texWidth; tx += blockDim.x) { me = voronoi[id + tx]; if (me.x >= margin + BAND) { endBlk[tid] = tx; break; } } } __syncthreads(); count = 0; // We process one BAND at a time. for (margin = 0; margin < texWidth; margin += BAND, count++) { // Only for the first iteration of tx // Make sure we detect the boundary at tx = 0 other.x = -1; // Left edge, scan through the row for (tx = startBlk[count] + tid; tx < endBlk[count]; tx += blockDim.x) { if (tx > 0) other = voronoi[id + tx - 1]; me = voronoi[id + tx]; offset = me.x - margin; // margin <= me.x < margin + BAND && the closest site of the previous pixel is different if (offset >= 0 && offset < BAND && other.x < me.x) { if (tx > 0) { sharedTotalX[offset] = prefixX[id + tx - 1]; sharedTotalY[offset] = prefixY[id + tx - 1]; sharedTotalW[offset] = prefixW[id + tx - 1]; } else { sharedTotalX[offset] = 0.0f; sharedTotalY[offset] = 0.0f; sharedTotalW[offset] = 0.0f; } } } __syncthreads(); // Right edge for (tx = startBlk[count] + tid; tx < endBlk[count]; tx += blockDim.x) { me = voronoi[id + tx]; offset = me.x - margin; if (tx < texWidth - 1) other = voronoi[id + tx + 1]; else other.x = texWidth; // margin <= me.x < margin + BAND && the closest site of the next pixel is different if (offset >= 0 && offset < BAND && me.x < other.x) { sharedTotalX[offset] = prefixX[id + tx] - sharedTotalX[offset]; sharedTotalY[offset] = prefixY[id + tx] - sharedTotalY[offset]; sharedTotalW[offset] = prefixW[id + tx] - sharedTotalW[offset]; } } __syncthreads(); // Write for (tx = tid; tx < BAND; tx += blockDim.x) if (margin + tx < texWidth) { totalX[id + margin + tx] = sharedTotalX[tx]; totalY[id + margin + tx] = sharedTotalY[tx]; totalW[id + margin + tx] = sharedTotalW[tx]; } } } __global__ void kernelScan_Y(short *voronoi, float *totalX, float *totalY, float *totalW, int size) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * BLOCKSIZE; int id = TOID(tx, ty, size), tid; short pixel, last = MARKER; float tmpX = 0.0, tmpY = 0.0, tmpW = 0.0; for (int i = 0; i < BLOCKSIZE; ++i, ++ty, id += size) { __syncthreads(); pixel = voronoi[id]; if (pixel != last) { if (last != MARKER) { tid = TOID(tx, last, size); atomicAdd(totalX + tid, tmpX); atomicAdd(totalY + tid, tmpY); atomicAdd(totalW + tid, tmpW); } tmpX = tmpY = tmpW = 0.0; last = pixel; } if (pixel != MARKER && pixel != ty) { tmpX += totalX[id]; tmpY += totalY[id]; tmpW += totalW[id]; } } if (last != MARKER) { tid = TOID(tx, last, size); atomicAdd(totalX + tid, tmpX); atomicAdd(totalY + tid, tmpY); atomicAdd(totalW + tid, tmpW); } } __global__ void kernelDebug_Y(short *voronoi, float *totalX, float *totalY, float *totalW, int size) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = 0; int id = TOID(tx, ty, size); short pixel, last = MARKER; for (; ty < size; ++ty, id += size) { pixel = voronoi[id]; if (pixel != last) { if (last != MARKER) printf("%d %d\n", tx, last); last = pixel; } } if (last != MARKER) printf("%d %d\n", tx, last); } __global__ void kernelUpdateSites(short *voronoi, float *totalX, float *totalY, float *totalW, float *density, short2 *output, bool *mask, int size, float omega) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; float pX, pY, pW; int id = TOID(tx, ty, size); short pixel, seed = voronoi[id]; if (seed != ty) return; pX = totalX[id]; pY = totalY[id]; pW = totalW[id]; float _x = pX / pW, _y = pY / pW; short2 rc = make_short2(tx + (_x - tx) * omega + 0.5f, ty + (_y - ty) * omega + 0.5f); rc.x = max(min(rc.x, size - 1), 0); rc.y = max(min(rc.y, size - 1), 0); if (mask[id] || density[TOID(rc.x, rc.y, size)] == 0) rc = make_short2(tx, ty); id = TOID(rc.x, rc.y, size); output[id] = rc; } //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernelCalcEnergy(short2 *voronoi, float *density, float *nrgTex, int size) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; int id = TOID(tx, ty, size); short2 site = voronoi[id]; float dx = (site.x - tx) * 1.0f / size; float dy = (site.y - ty) * 1.0f / size; float dist = dx * dx + dy * dy; nrgTex[id] = density[id] * dist; } template <unsigned int blockSize> __device__ void warpReduce(volatile float *sdata, unsigned int tid) { if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } template <unsigned int blockSize> __global__ void kernelReduce(float *input, float *output, unsigned int n) { __shared__ float sdata[blockSize]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize * 2) + tid; unsigned int gridSize = blockSize * 2 * gridDim.x; sdata[tid] = 0; while (i < n) { sdata[tid] += input[i] + input[i + blockSize]; i += gridSize; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32) warpReduce<blockSize>(sdata, tid); if (tid == 0) output[blockIdx.x] = sdata[0]; } //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////// Initialization //////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// std::unordered_map<int,int> m_1, m_2, m_3, m_4; void gcvtInitialization(int textureSize) { m_1[256] = 4, m_2[256] = 32, m_3[256] = 16; m_1[512] = 8, m_2[512] = 32, m_3[512] = 16; m_1[1024] = 16, m_2[1024] = 32, m_3[1024] = 16; m_1[2048] = 32, m_2[2048] = 32, m_3[2048] = 8; m_1[4096] = 64, m_2[4096] = 32, m_3[4096] = 8; m_1[8192] = 128, m_2[8192] = 32, m_3[8192] = 4; pbaTexSize = textureSize; pbaMemSize = pbaTexSize * pbaTexSize * sizeof(short2); pbaTextures = (short2 **) malloc(2 * sizeof(short2 *)); pbaDensity = (float **) malloc(10 * sizeof(float *)); pbaPrefixX = (float **) malloc(10 * sizeof(float *)); pbaPrefixY = (float **) malloc(10 * sizeof(float *)); pbaPrefixW = (float **) malloc(10 * sizeof(float *)); cudaMalloc((void **) &pbaTextures[0], pbaMemSize); cudaMalloc((void **) &pbaTextures[1], pbaMemSize); cudaMalloc((void **) &pbaMargin, m_1[pbaTexSize] * pbaTexSize * sizeof(short2)); cudaMalloc((void **) &pbaTotalX, pbaTexSize * pbaTexSize * sizeof(float)); cudaMalloc((void **) &pbaTotalY, pbaTexSize * pbaTexSize * sizeof(float)); cudaMalloc((void **) &pbaTotalW, pbaTexSize * pbaTexSize * sizeof(float)); cudaMalloc((void **) &pbaEnergyTex, pbaTexSize * pbaTexSize * sizeof(float)); cudaMalloc((void **) &constrainMask_d, pbaTexSize * pbaTexSize * sizeof(bool)); for(int i = 0; i < 10; ++i) { if((pbaTexSize>>i) < 256) break; cudaMalloc((void **) &pbaDensity[i], (pbaTexSize>>i) * (pbaTexSize>>i) * sizeof(float)); cudaMalloc((void **) &pbaPrefixX[i], (pbaTexSize>>i) * (pbaTexSize>>i) * sizeof(float)); cudaMalloc((void **) &pbaPrefixY[i], (pbaTexSize>>i) * (pbaTexSize>>i) * sizeof(float)); cudaMalloc((void **) &pbaPrefixW[i], (pbaTexSize>>i) * (pbaTexSize>>i) * sizeof(float)); } } // Deallocate all allocated memory void pbaCVDDeinitialization() { cudaFree(pbaTextures[0]); cudaFree(pbaTextures[1]); cudaFree(pbaMargin); for(int i = 0; i < 10; ++i) { if((pbaTexSize>>i) < 256) break; cudaFree(pbaDensity[i]); cudaFree(pbaPrefixX[i]); cudaFree(pbaPrefixY[i]); cudaFree(pbaPrefixW[i]); } cudaFree(pbaTotalX); cudaFree(pbaTotalY); cudaFree(pbaTotalW); cudaFree(pbaEnergyTex); cudaFree(constrainMask_d); free(pbaTextures); free(pbaDensity); free(pbaPrefixX); free(pbaPrefixY); free(pbaPrefixW); } // Copy input to GPU void pba2DInitializeInput(float *density, bool *mask) { cudaMemcpy(pbaDensity[0], density, pbaTexSize * pbaTexSize * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(constrainMask_d, mask, pbaTexSize * pbaTexSize * sizeof(bool), cudaMemcpyHostToDevice); pbaVoronoi = pbaTextures[0]; pbaTemp = pbaTextures[1]; pbaBuffer = 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// Parallel Banding Algorithm plus ////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// // Phase 1 of PBA. m1 must divides texture size and equal or less than size / 64 void pba2DPhase1(int m1) { dim3 block = dim3(BLOCKSIZE); dim3 grid = dim3(pbaTexSize / block.x, m1); kernelFloodDown<<< grid, block >>>(pbaTextures[pbaBuffer], pbaTextures[pbaBuffer], pbaTexSize, pbaTexSize / m1); kernelFloodUp<<< grid, block >>>(pbaTextures[pbaBuffer], pbaTextures[pbaBuffer], pbaTexSize, pbaTexSize / m1); kernelPropagateInterband<<< grid, block >>>(pbaTextures[pbaBuffer], pbaMargin, pbaTexSize, pbaTexSize / m1); kernelUpdateVertical<<< grid, block >>>(pbaTextures[pbaBuffer], pbaMargin, pbaTextures[1^pbaBuffer], pbaTexSize, pbaTexSize / m1); } // Phase 2 of PBA. m2 must divides texture size void pba2DPhase2(int m2) { // Compute proximate points locally in each band dim3 block = dim3(BLOCKSIZE); dim3 grid = dim3(pbaTexSize / block.x, m2); kernelProximatePoints<<< grid, block >>>(pbaTextures[1^pbaBuffer], pbaTextures[pbaBuffer], pbaTexSize, pbaTexSize / m2); kernelCreateForwardPointers<<< grid, block >>>(pbaTextures[pbaBuffer], pbaTextures[pbaBuffer], pbaTexSize, pbaTexSize / m2); // Repeatly merging two bands into one for (int noBand = m2; noBand > 1; noBand /= 2) { grid = dim3(pbaTexSize / block.x, noBand / 2); kernelMergeBands<<< grid, block >>>(pbaTextures[1^pbaBuffer], pbaTextures[pbaBuffer], pbaTextures[pbaBuffer], pbaTexSize, pbaTexSize / noBand); } // Replace the forward link with the X coordinate of the seed to remove // the need of looking at the other texture. We need it for coloring. grid = dim3(pbaTexSize / block.x, pbaTexSize); kernelDoubleToSingleList<<< grid, block >>>(pbaTextures[1^pbaBuffer], pbaTextures[pbaBuffer], pbaTextures[pbaBuffer], pbaTexSize); } // Phase 3 of PBA. m3 must divides texture size and equal or less than 64 void pba2DPhase3(int m3) { dim3 block = dim3(BLOCKSIZE, m3); dim3 grid = dim3(pbaTexSize / block.x); kernelColor<<< grid, block >>>(pbaTextures[pbaBuffer], pbaTextures[1^pbaBuffer], pbaTexSize); } void pba2DCompute(int m1, int m2, int m3) { pba2DPhase1(m1); pba2DPhase2(m2); pba2DPhase3(m3); pbaVoronoi = pbaTextures[1^pbaBuffer]; pbaTemp = pbaTextures[pbaBuffer]; pbaBuffer = 1^pbaBuffer; } //////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////// Centroidal Voronoi Tessellation //////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// void pbaCVDDensityScaling(int k) { dim3 block(BLOCKX, BLOCKY); for(int i = 1; i < k; ++i) { dim3 grid((pbaTexSize >> i) / block.x, (pbaTexSize >> i) / block.y); kernelDensityScaling<<< grid, block >>>(pbaDensity[i - 1], pbaDensity[i], pbaTexSize >> i); } } void pbaCVDComputeWeightedPrefix(int k) { dim3 block(BLOCKSIZE); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); int ns = BLOCKSIZE * 3 * sizeof(float); for(int i = 0; i < k; ++i) { dim3 grid(pbaTexSize >> i); kernelComputeWeightedPrefixX<<< grid, block, ns >>>(pbaPrefixW[i], pbaPrefixX[i], pbaPrefixY[i], pbaDensity[i], pbaTexSize >> i); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); } } void pbaCVDComputeCentroid() { dim3 block(BLOCKSIZE); dim3 grid(pbaTexSize); int ns = pbaTexSize * sizeof(short); kernelVoronoi1D<<< grid, block, ns >>>(pbaVoronoi, (int *) pbaTemp, pbaTexSize); kernelTotal_X<<< grid, block >>>(pbaVoronoi, pbaPrefixX[pbaScale], pbaPrefixY[pbaScale], pbaPrefixW[pbaScale], \ pbaTotalX, pbaTotalY, pbaTotalW, pbaTexSize); block = dim3(BLOCKSIZE); grid = dim3(pbaTexSize / block.x, pbaTexSize / block.x); kernelScan_Y<<< grid, block >>>((short *) pbaTemp, pbaTotalX, pbaTotalY, pbaTotalW, pbaTexSize); } void pbaCVDUpdateSites() { dim3 block(BLOCKX, BLOCKY); dim3 grid(pbaTexSize / block.x, pbaTexSize / block.y); kernelFillShort<<< grid, block >>>(pbaVoronoi, MARKER, pbaTexSize); kernelUpdateSites<<< grid, block >>>((short *) pbaTemp, pbaTotalX, pbaTotalY, pbaTotalW, pbaDensity[pbaScale], \ pbaVoronoi, constrainMask_d, pbaTexSize, pbaOmega); } void pbaCVDZoomIn() { dim3 block(BLOCKX, BLOCKY); dim3 grid1(pbaTexSize / block.x, pbaTexSize / block.y); dim3 grid2((pbaTexSize << 1) / block.x, (pbaTexSize << 1) / block.y); kernelFillShort<<< grid2, block >>>(pbaTemp, MARKER, pbaTexSize << 1); kernelZoomIn<<< grid1, block >>>(pbaVoronoi, pbaTemp, pbaTexSize, 1); pbaBuffer = 1^pbaBuffer; short2 *tmp_ptr = pbaVoronoi; pbaVoronoi = pbaTemp; pbaTemp = tmp_ptr; } //////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////// Calculate CVT Engergy Function ////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// float pbaCVDCalcEnergy() { dim3 block(BLOCKX, BLOCKY); dim3 grid(pbaTexSize / block.x, pbaTexSize / block.y); kernelCalcEnergy<<< grid, block >>>(pbaVoronoi, pbaDensity[pbaScale], pbaEnergyTex, pbaTexSize); const int blockSize = 512; int n = pbaTexSize * pbaTexSize; int blocksPerGrid; do { blocksPerGrid = min(int(std::ceil((1.*n) / blockSize)), 32768); kernelReduce<blockSize><<< blocksPerGrid, blockSize >>>(pbaEnergyTex, pbaEnergyTex, n); n = blocksPerGrid; } while (n > blockSize); if (n > 1) { kernelReduce<blockSize><<< 1, blockSize >>>(pbaEnergyTex, pbaEnergyTex, n); } cudaMemcpy(&pbaEnergy_h, pbaEnergyTex, sizeof(float), cudaMemcpyDeviceToHost); return pbaEnergy_h * powf(2.0, pbaScale * 2.0); } int gcvtIterations; void gCVT(short *Voronoi, float *density_d, bool *mask, int size, int depth, int maxIter) { gcvtInitialization(size); for (int i = 0; i < depth; ++i) if ((pbaTexSize >> i) < 256) { depth = i; break; } pba2DInitializeInput(density_d, mask); pbaCVDDensityScaling(depth); pbaCVDComputeWeightedPrefix(depth); pbaScale = 0; gcvtIterations = 0; pbaTexSize >>= depth; pbaTexSize <<= 1; cudaMemcpy(pbaVoronoi, Voronoi, pbaTexSize * pbaTexSize * sizeof(short2), cudaMemcpyHostToDevice); pbaTexSize >>= 1; float Energy, lastEnergy = 1e18, diffEnergy, gradientEnergy; std::vector <int> switch_iter; switch_iter.clear(); pbaOmega = 2.0; for (pbaScale = depth - 1; ~pbaScale; --pbaScale) { pbaTexSize <<= 1; do { pba2DCompute(m_1[pbaTexSize], m_2[pbaTexSize], m_3[pbaTexSize]); if (gcvtIterations % 10 == 0) Energy = pbaCVDCalcEnergy(); pbaCVDComputeCentroid(); pbaCVDUpdateSites(); gcvtIterations++; if (gcvtIterations % 10 == 0) { diffEnergy = lastEnergy - Energy; gradientEnergy = diffEnergy / 10.0; //printf("Iter %d: %f %f\n", gcvtIterations, Energy, gradientEnergy); pbaOmega = min(2.0, 1.0 + diffEnergy); if (pbaScale) { if (gradientEnergy < 3e-1) break; } else { if (gradientEnergy < THRESHOLD) break; } lastEnergy = Energy; } } while (gcvtIterations < maxIter); switch_iter.push_back(gcvtIterations); if (pbaScale) pbaCVDZoomIn(); } pba2DCompute(m_1[pbaTexSize], m_2[pbaTexSize], m_3[pbaTexSize]); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); cudaMemcpy(Voronoi, pbaVoronoi, pbaMemSize, cudaMemcpyDeviceToHost); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); pbaCVDDeinitialization(); }
6642ec02e33d18ee1585758d25d29c2f3ae8e909.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2020 ETH Zurich. All Rights Reserved. #include "from_rod.h" #include "drivers/rod.h" #include <mirheo/core/celllist.h> #include <mirheo/core/pvs/rod_vector.h> #include <mirheo/core/pvs/views/rv.h> #include <mirheo/core/utils/kernel_launch.h> namespace mirheo { BounceFromRod::BounceFromRod(const MirState *state, const std::string& name, real radius, VarBounceKernel varBounceKernel) : Bouncer(state, name), radius_(radius), varBounceKernel_(varBounceKernel) {} BounceFromRod::~BounceFromRod() = default; void BounceFromRod::setup(ObjectVector *ov) { Bouncer::setup(ov); rv_ = dynamic_cast<RodVector*> (ov); if (rv_ == nullptr) die("bounce from rod must be used with a rod vector"); ov->requireDataPerParticle<real4> (channel_names::oldPositions, DataManager::PersistenceMode::Active, DataManager::ShiftMode::Active); } void BounceFromRod::setPrerequisites(ParticleVector *pv) { // do not set it to persistent because bounce happens after integration pv->requireDataPerParticle<real4> (channel_names::oldPositions, DataManager::PersistenceMode::None, DataManager::ShiftMode::Active); } std::vector<std::string> BounceFromRod::getChannelsToBeExchanged() const { return {channel_names::oldPositions}; } std::vector<std::string> BounceFromRod::getChannelsToBeSentBack() const { return {channel_names::forces}; } void BounceFromRod::exec(ParticleVector *pv, CellList *cl, ParticleVectorLocality locality, hipStream_t stream) { auto activeRV = rv_->get(locality); debug("Bouncing %d '%s' particles from %d '%s' rods (%s)", pv->local()->size(), pv->getCName(), activeRV->getNumObjects(), rv_->getCName(), getParticleVectorLocalityStr(locality).c_str()); rv_->findExtentAndCOM(stream, locality); const int totalSegments = activeRV->getNumSegmentsPerRod() * activeRV->getNumObjects(); // Set maximum possible number of collisions with segments // In case of crash, the estimate should be increased const int maxCollisions = static_cast<int>(collisionsPerSeg_ * static_cast<real>(totalSegments)); table_.collisionTable.resize_anew(maxCollisions); table_.nCollisions.clear(stream); rod_bounce_kernels::SegmentTable devCollisionTable { maxCollisions, table_.nCollisions.devPtr(), table_.collisionTable.devPtr() }; // Setup collision times array. For speed and simplicity initial time will be 0, // and after the collisions detected its i-th element will be t_i-1.0_r, where 0 <= t_i <= 1 // is the collision time, or 0 if no collision with the particle found collisionTimes.resize_anew(pv->local()->size()); collisionTimes.clear(stream); const int nthreads = 128; activeRV->forces().clear(stream); RVviewWithOldParticles rvView(rv_, activeRV); PVviewWithOldParticles pvView(pv, pv->local()); // Step 1, find all the candidate collisions SAFE_KERNEL_LAUNCH( rod_bounce_kernels::findBounces, getNblocks(totalSegments, nthreads), nthreads, 0, stream, rvView, radius_, pvView, cl->cellInfo(), devCollisionTable, collisionTimes.devPtr() ); table_.nCollisions.downloadFromDevice(stream); const int nCollisions = table_.nCollisions[0]; debug("Found %d rod collision candidates", nCollisions); if (nCollisions > maxCollisions) die("Found too many rod collisions (%d)," "something may be broken or you need to increase the estimate", nCollisions); // Step 2, resolve the collisions mpark::visit([&](auto& bounceKernel) { bounceKernel.update(rng_); SAFE_KERNEL_LAUNCH( rod_bounce_kernels::performBouncing, getNblocks(nCollisions, nthreads), nthreads, 0, stream, rvView, radius_, pvView, nCollisions, devCollisionTable.indices, collisionTimes.devPtr(), getState()->dt, bounceKernel); }, varBounceKernel_); } } // namespace mirheo
6642ec02e33d18ee1585758d25d29c2f3ae8e909.cu
// Copyright 2020 ETH Zurich. All Rights Reserved. #include "from_rod.h" #include "drivers/rod.h" #include <mirheo/core/celllist.h> #include <mirheo/core/pvs/rod_vector.h> #include <mirheo/core/pvs/views/rv.h> #include <mirheo/core/utils/kernel_launch.h> namespace mirheo { BounceFromRod::BounceFromRod(const MirState *state, const std::string& name, real radius, VarBounceKernel varBounceKernel) : Bouncer(state, name), radius_(radius), varBounceKernel_(varBounceKernel) {} BounceFromRod::~BounceFromRod() = default; void BounceFromRod::setup(ObjectVector *ov) { Bouncer::setup(ov); rv_ = dynamic_cast<RodVector*> (ov); if (rv_ == nullptr) die("bounce from rod must be used with a rod vector"); ov->requireDataPerParticle<real4> (channel_names::oldPositions, DataManager::PersistenceMode::Active, DataManager::ShiftMode::Active); } void BounceFromRod::setPrerequisites(ParticleVector *pv) { // do not set it to persistent because bounce happens after integration pv->requireDataPerParticle<real4> (channel_names::oldPositions, DataManager::PersistenceMode::None, DataManager::ShiftMode::Active); } std::vector<std::string> BounceFromRod::getChannelsToBeExchanged() const { return {channel_names::oldPositions}; } std::vector<std::string> BounceFromRod::getChannelsToBeSentBack() const { return {channel_names::forces}; } void BounceFromRod::exec(ParticleVector *pv, CellList *cl, ParticleVectorLocality locality, cudaStream_t stream) { auto activeRV = rv_->get(locality); debug("Bouncing %d '%s' particles from %d '%s' rods (%s)", pv->local()->size(), pv->getCName(), activeRV->getNumObjects(), rv_->getCName(), getParticleVectorLocalityStr(locality).c_str()); rv_->findExtentAndCOM(stream, locality); const int totalSegments = activeRV->getNumSegmentsPerRod() * activeRV->getNumObjects(); // Set maximum possible number of collisions with segments // In case of crash, the estimate should be increased const int maxCollisions = static_cast<int>(collisionsPerSeg_ * static_cast<real>(totalSegments)); table_.collisionTable.resize_anew(maxCollisions); table_.nCollisions.clear(stream); rod_bounce_kernels::SegmentTable devCollisionTable { maxCollisions, table_.nCollisions.devPtr(), table_.collisionTable.devPtr() }; // Setup collision times array. For speed and simplicity initial time will be 0, // and after the collisions detected its i-th element will be t_i-1.0_r, where 0 <= t_i <= 1 // is the collision time, or 0 if no collision with the particle found collisionTimes.resize_anew(pv->local()->size()); collisionTimes.clear(stream); const int nthreads = 128; activeRV->forces().clear(stream); RVviewWithOldParticles rvView(rv_, activeRV); PVviewWithOldParticles pvView(pv, pv->local()); // Step 1, find all the candidate collisions SAFE_KERNEL_LAUNCH( rod_bounce_kernels::findBounces, getNblocks(totalSegments, nthreads), nthreads, 0, stream, rvView, radius_, pvView, cl->cellInfo(), devCollisionTable, collisionTimes.devPtr() ); table_.nCollisions.downloadFromDevice(stream); const int nCollisions = table_.nCollisions[0]; debug("Found %d rod collision candidates", nCollisions); if (nCollisions > maxCollisions) die("Found too many rod collisions (%d)," "something may be broken or you need to increase the estimate", nCollisions); // Step 2, resolve the collisions mpark::visit([&](auto& bounceKernel) { bounceKernel.update(rng_); SAFE_KERNEL_LAUNCH( rod_bounce_kernels::performBouncing, getNblocks(nCollisions, nthreads), nthreads, 0, stream, rvView, radius_, pvView, nCollisions, devCollisionTable.indices, collisionTimes.devPtr(), getState()->dt, bounceKernel); }, varBounceKernel_); } } // namespace mirheo
d1d194b70b79e79218cdf8ae76160b70dbf5d31d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "../include/config.cuh" #include "../include/matrixBankConflict.cuh" __global__ void gpuMatrixMulBankConflict(int* d_A, int* d_B, int* d_C, int m, int n, int k){ __shared__ int A_tile[TILE_SIZE][TILE_SIZE]; __shared__ int B_tile[TILE_SIZE][TILE_SIZE]; //Ctile //tile int bx = blockIdx.x, by = blockIdx.y; int tx = threadIdx.x, ty = threadIdx.y; //illustrate :https://cnugteren.github.io/tutorial/pages/page4.html // A, aBeginaEndTile, Tile int aBegin = blockIdx.y * TILE_SIZE * n; int aEnd = aBegin + n - 1; int aStride = TILE_SIZE; int bBegin = blockIdx.x * TILE_SIZE * k; int bStride = TILE_SIZE; int accu = 0; // CTile for(int i = aBegin, j = bBegin; i <= aEnd; i += aStride, j += bStride){ //load share memory //Tile A_tile[ty][tx] = d_A[i + n * ty + tx]; B_tile[ty][tx] = d_B[j + k * ty + tx]; //B_tile[tx][ty] = d_B[j + k * tx + ty]; __syncthreads(); for(int k = 0;k < TILE_SIZE; k++) accu += A_tile[ty][k] * B_tile[tx][k]; __syncthreads(); } //ABC int cIdx = k * TILE_SIZE * by + TILE_SIZE * bx; d_C[cIdx + k * ty + tx] = accu; }
d1d194b70b79e79218cdf8ae76160b70dbf5d31d.cu
#include <stdio.h> #include <cuda_runtime.h> #include "../include/config.cuh" #include "../include/matrixBankConflict.cuh" __global__ void gpuMatrixMulBankConflict(int* d_A, int* d_B, int* d_C, int m, int n, int k){ __shared__ int A_tile[TILE_SIZE][TILE_SIZE]; __shared__ int B_tile[TILE_SIZE][TILE_SIZE]; //一个线程块计算矩阵C的一个tile //线程块中的每一个线程计算tile中的一个元素 int bx = blockIdx.x, by = blockIdx.y; int tx = threadIdx.x, ty = threadIdx.y; //illustrate :https://cnugteren.github.io/tutorial/pages/page4.html // A是横着的条, aBegin和aEnd分别是Tile第一行的开始和结束, 每循环一次横着移动一个Tile int aBegin = blockIdx.y * TILE_SIZE * n; int aEnd = aBegin + n - 1; int aStride = TILE_SIZE; int bBegin = blockIdx.x * TILE_SIZE * k; int bStride = TILE_SIZE; int accu = 0; // 计算C的一个Tile for(int i = aBegin, j = bBegin; i <= aEnd; i += aStride, j += bStride){ //load share memory //从Tile中取出一个点放到共享内存中 A_tile[ty][tx] = d_A[i + n * ty + tx]; B_tile[ty][tx] = d_B[j + k * ty + tx]; //B_tile[tx][ty] = d_B[j + k * tx + ty]; __syncthreads(); for(int k = 0;k < TILE_SIZE; k++) accu += A_tile[ty][k] * B_tile[tx][k]; __syncthreads(); } //A中横着的一行和B中竖着的一列累加完毕放到C中对应位置 int cIdx = k * TILE_SIZE * by + TILE_SIZE * bx; d_C[cIdx + k * ty + tx] = accu; }
3c9686450a44955d1c0a67b3d5b4eee69c4e671d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <iostream> #include "Logging.hpp" #include "tensor/Tensor.hpp" using namespace DLFS; using namespace std; /** * Declarations */ __global__ void SigmoidCrossEntropyFloat(TensorInfoCUDA logitShape, float *logits, TensorInfoCUDA labelShape, uint32_t *labels, TensorInfoCUDA outputShape, float *output, bool reduce_mean); __global__ void SigmoidCrossEntropyBackwardFloat(TensorInfoCUDA logitShape, float *logits, float *dLogits, TensorInfoCUDA labelShape, uint32_t *labels); /** * Kernels */ extern "C" void LaunchSigmoidCEKernel(CustomOpDataType dataType, TensorShape logitShape, void *logits, TensorShape labelShape, void *labels, TensorShape outputShape, void *output, bool reduce_mean) { int threads = logitShape[0]; TensorInfoCUDA ti[3] = {TensorInfoCUDA(logitShape), TensorInfoCUDA(labelShape), TensorInfoCUDA(outputShape)}; switch (dataType) { case CustomOpDataType::Float: LOG.DEBUG() << "Launching sigmoid cross entropy (float) kernel with " << threads << " threads"; hipLaunchKernelGGL(( SigmoidCrossEntropyFloat), dim3(1), dim3(threads), logitShape[0]*logitShape[3], 0, ti[0], (float *)logits, ti[1], (uint32_t *)labels, ti[2], (float *)output, reduce_mean); break; default: throw std::runtime_error("Not implemented."); } } extern "C" void LaunchSigmoidCEBackwardKernel(CustomOpDataType dataType, TensorShape logitShape, void *logits, void *dLogits, TensorShape labelShape, void *labels) { int threads = logitShape[0]; TensorInfoCUDA ti[2] = {TensorInfoCUDA(logitShape), TensorInfoCUDA(labelShape)}; switch (dataType) { case CustomOpDataType::Float: LOG.DEBUG() << "Launching sigmoid cross entropy backward (float) kernel with " << threads << " threads"; hipLaunchKernelGGL(( SigmoidCrossEntropyBackwardFloat), dim3(1), dim3(threads), 0, 0, ti[0], (float *)logits, (float*)dLogits, ti[1], (uint32_t *)labels); break; default: throw std::runtime_error("Not implemented."); } } /** * SigmoidCrossEntropyFloat * Logits - of shape batch_size x 1 x 1 x num_classes * labels - of shape batch_size x 1 x 1 x 1 * * Parallelized over the batch dimension. */ __global__ void SigmoidCrossEntropyFloat(TensorInfoCUDA logitShape, float *logits, TensorInfoCUDA labelShape, uint32_t *labels, TensorInfoCUDA outputShape, float *output, bool reduce_mean) { unsigned int batchIdx = threadIdx.x; extern __shared__ float sdata[]; // Check to make sure we are not out of bounds. if (batchIdx > logitShape.n-1) return; uint32_t label = labels[batchIdx]; float loss = 0.0; for (unsigned int classIdx = 0; classIdx < logitShape.c; classIdx++) { unsigned int index = batchIdx * logitShape.c + classIdx; float x = logits[index]; loss += max(x, 0.0) + logf(1.0f + expf(-abs(x))); } sdata[batchIdx] = loss - logits[batchIdx*logitShape.c + label]; if(!reduce_mean){ output[batchIdx] = sdata[batchIdx]; return; } __syncthreads(); for(unsigned int stride = 1; stride < logitShape.n; stride*=2){ if((threadIdx.x %(stride*2))==0){ sdata[threadIdx.x] += sdata[threadIdx.x+stride]; } __syncthreads(); // Sync must happen at every level of the pyramid; } if (threadIdx.x == 0){ output[0] = sdata[0] / static_cast<float>(logitShape.n); } } /** * SigmoidCrossEntropyBackwardFloat * Logits - of shape batch_size x 1 x 1 x num_classes * labels - of shape batch_size x 1 x 1 x 1 * * Parallelized over the batch dimension. */ __global__ void SigmoidCrossEntropyBackwardFloat(TensorInfoCUDA logitShape, float *logits, float *dLogits, TensorInfoCUDA labelShape, uint32_t *labels) { unsigned int batchIdx = threadIdx.x; // Check to make sure we are not out of bounds. if (batchIdx > logitShape.n-1) return; uint32_t label = labels[batchIdx]; // float normalization = logitShape.n*logitShape.c; for (unsigned int classIdx = 0; classIdx < logitShape.c; classIdx++) { unsigned int index = batchIdx * logitShape.c + classIdx; float x = logits[index]; float z = label == classIdx ? 1.0f : 0.0f; float expAbs = expf(-x); float logTerm = expAbs / (1 + expAbs); float dCEdX = z - logTerm; dLogits[index] = dCEdX; } }
3c9686450a44955d1c0a67b3d5b4eee69c4e671d.cu
#include <cuda_runtime.h> #include <iostream> #include "Logging.hpp" #include "tensor/Tensor.hpp" using namespace DLFS; using namespace std; /** * Declarations */ __global__ void SigmoidCrossEntropyFloat(TensorInfoCUDA logitShape, float *logits, TensorInfoCUDA labelShape, uint32_t *labels, TensorInfoCUDA outputShape, float *output, bool reduce_mean); __global__ void SigmoidCrossEntropyBackwardFloat(TensorInfoCUDA logitShape, float *logits, float *dLogits, TensorInfoCUDA labelShape, uint32_t *labels); /** * Kernels */ extern "C" void LaunchSigmoidCEKernel(CustomOpDataType dataType, TensorShape logitShape, void *logits, TensorShape labelShape, void *labels, TensorShape outputShape, void *output, bool reduce_mean) { int threads = logitShape[0]; TensorInfoCUDA ti[3] = {TensorInfoCUDA(logitShape), TensorInfoCUDA(labelShape), TensorInfoCUDA(outputShape)}; switch (dataType) { case CustomOpDataType::Float: LOG.DEBUG() << "Launching sigmoid cross entropy (float) kernel with " << threads << " threads"; SigmoidCrossEntropyFloat<<<1, threads, logitShape[0]*logitShape[3]>>>(ti[0], (float *)logits, ti[1], (uint32_t *)labels, ti[2], (float *)output, reduce_mean); break; default: throw std::runtime_error("Not implemented."); } } extern "C" void LaunchSigmoidCEBackwardKernel(CustomOpDataType dataType, TensorShape logitShape, void *logits, void *dLogits, TensorShape labelShape, void *labels) { int threads = logitShape[0]; TensorInfoCUDA ti[2] = {TensorInfoCUDA(logitShape), TensorInfoCUDA(labelShape)}; switch (dataType) { case CustomOpDataType::Float: LOG.DEBUG() << "Launching sigmoid cross entropy backward (float) kernel with " << threads << " threads"; SigmoidCrossEntropyBackwardFloat<<<1, threads>>>( ti[0], (float *)logits, (float*)dLogits, ti[1], (uint32_t *)labels); break; default: throw std::runtime_error("Not implemented."); } } /** * SigmoidCrossEntropyFloat * Logits - of shape batch_size x 1 x 1 x num_classes * labels - of shape batch_size x 1 x 1 x 1 * * Parallelized over the batch dimension. */ __global__ void SigmoidCrossEntropyFloat(TensorInfoCUDA logitShape, float *logits, TensorInfoCUDA labelShape, uint32_t *labels, TensorInfoCUDA outputShape, float *output, bool reduce_mean) { unsigned int batchIdx = threadIdx.x; extern __shared__ float sdata[]; // Check to make sure we are not out of bounds. if (batchIdx > logitShape.n-1) return; uint32_t label = labels[batchIdx]; float loss = 0.0; for (unsigned int classIdx = 0; classIdx < logitShape.c; classIdx++) { unsigned int index = batchIdx * logitShape.c + classIdx; float x = logits[index]; loss += max(x, 0.0) + logf(1.0f + expf(-abs(x))); } sdata[batchIdx] = loss - logits[batchIdx*logitShape.c + label]; if(!reduce_mean){ output[batchIdx] = sdata[batchIdx]; return; } __syncthreads(); for(unsigned int stride = 1; stride < logitShape.n; stride*=2){ if((threadIdx.x %(stride*2))==0){ sdata[threadIdx.x] += sdata[threadIdx.x+stride]; } __syncthreads(); // Sync must happen at every level of the pyramid; } if (threadIdx.x == 0){ output[0] = sdata[0] / static_cast<float>(logitShape.n); } } /** * SigmoidCrossEntropyBackwardFloat * Logits - of shape batch_size x 1 x 1 x num_classes * labels - of shape batch_size x 1 x 1 x 1 * * Parallelized over the batch dimension. */ __global__ void SigmoidCrossEntropyBackwardFloat(TensorInfoCUDA logitShape, float *logits, float *dLogits, TensorInfoCUDA labelShape, uint32_t *labels) { unsigned int batchIdx = threadIdx.x; // Check to make sure we are not out of bounds. if (batchIdx > logitShape.n-1) return; uint32_t label = labels[batchIdx]; // float normalization = logitShape.n*logitShape.c; for (unsigned int classIdx = 0; classIdx < logitShape.c; classIdx++) { unsigned int index = batchIdx * logitShape.c + classIdx; float x = logits[index]; float z = label == classIdx ? 1.0f : 0.0f; float expAbs = expf(-x); float logTerm = expAbs / (1 + expAbs); float dCEdX = z - logTerm; dLogits[index] = dCEdX; } }
6cfa366871295ad319f1535bea0c21c8ad7777d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void bcnn_cuda_grad_bias_kernel(float *grad_bias, float *grad_data, int num_channels, int spatial_size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int channel = blockIdx.y; int batch_size = blockIdx.z; if (offset < spatial_size) grad_bias[channel] += grad_data[(batch_size * num_channels + channel) * spatial_size + offset]; }
6cfa366871295ad319f1535bea0c21c8ad7777d1.cu
#include "includes.h" __global__ void bcnn_cuda_grad_bias_kernel(float *grad_bias, float *grad_data, int num_channels, int spatial_size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int channel = blockIdx.y; int batch_size = blockIdx.z; if (offset < spatial_size) grad_bias[channel] += grad_data[(batch_size * num_channels + channel) * spatial_size + offset]; }
19fbd19a67d34f53942c0f8d968fff4fc7b4cdb4.hip
// !!! This is a file automatically generated by hipify!!! /* * flux_ML_iface.c * * Created on: Nov 25, 2015 * Author: erik */ #include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif #include "mex.h" #include "mpi.h" #include "hip/hip_runtime.h" #include "roctracer/roctx.h" #include "cudaCommon.h" #include "cudaFluidStep.h" #include "flux.h" // Only uncomment this if you plan to debug this file. // This will cause it to require output arguments to return data in, // and perturb code behavior by generating writes to the output debug arrays //#define DEBUGMODE FluidMethods mlmethodToEnum(int mlmethod); #ifdef DEBUGMODE #include "debug_inserts.h" #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { int wanted_nlhs = 0; #ifdef DEBUGMODE wanted_nlhs = 1; #endif if ((nrhs!= 6) || (nlhs != wanted_nlhs)) mexErrMsgTxt("Wrong number of arguments: need flux_ML_iface(fluid, bx, by, bz, [dt, purehydro?, order, step #, step method], run.geometry)\n"); MGArray fluid[5]; #ifdef USE_NVTX roctxRangePush(".cu flux step"); #endif /* Access bx/by/bz cell-centered arrays if magnetic!!!! */ /* ... */ int idxpost = 4; // 8 for the old way double *scalars = mxGetPr(prhs[idxpost]); if(mxGetNumberOfElements(prhs[idxpost]) != 5) { DROP_MEX_ERROR("Must rx 5 parameters in params vector: [dt, purehydro?, order, step #, step method]"); } double dt = scalars[0]; /* Access lambda (dt / dx) */ int ishydro = scalars[1]; /* determine if purely hydrodynamic */ int sweepDirect = (int)scalars[2]; /* Identify if forwards (sweepDirect = 1) or backwards (-1) */ int stepNum = (int)scalars[3]; /* step number (used to pick the permutation of the fluid propagators) */ int stepMethod = (int)scalars[4]; /* 1=HLL, 2=HLLC, 3=Xin/Jin */ /* Access topology structure */ ParallelTopology topo; FluidStepParams fsp; fsp.geometry = accessMatlabGeometryClass(prhs[idxpost+1]); const mxArray *mxtopo = mxGetProperty(prhs[idxpost+1], 0, "topology"); topoStructureToC(mxtopo, &topo); fsp.dt = dt; fsp.onlyHydro = ishydro; fsp.stepNumber = stepNum; fsp.stepDirection = sweepDirect; fsp.stepMethod = mlmethodToEnum(stepMethod); int numFluids = mxGetNumberOfElements(prhs[0]); int fluidct; CHECK_CUDA_ERROR("entering compiled fluid step"); int status; MGArray tempStorage; tempStorage.nGPUs = -1; // not allocated int numarrays; #ifdef DEBUGMODE numarrays = 6 + DBG_NUMARRAYS; #else #ifdef USE_RK3 numarrays = 11; #else numarrays = 6; #endif #endif for(fluidct = 0; fluidct < numFluids; fluidct++) { ThermoDetails therm = accessMatlabThermoDetails(mxGetProperty(prhs[0], fluidct, "thermoDetails")); status = MGA_accessFluidCanister(prhs[0], fluidct, &fluid[0]); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) break; if(tempStorage.nGPUs == -1) { roctxMark("flux_ML_iface.cu:109 large malloc 6 arrays"); status = MGA_allocSlab(fluid, &tempStorage, numarrays); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) break; } double rhoMin; mxArray *flprop = mxGetProperty(prhs[0], fluidct, "MINMASS"); if(flprop != NULL) { rhoMin = *((double *)mxGetPr(flprop)); } else { PRINT_FAULT_HEADER; printf("Unable to access fluid(%i).MINMASS property.\n", fluidct); PRINT_FAULT_FOOTER; status = ERROR_NULL_POINTER; break; } fsp.thermoGamma = therm.gamma; fsp.Cisothermal = therm.Cisothermal; if(therm.Cisothermal != -1) { fsp.thermoGamma = 2; // This makes the hydro pressure solver return internal energy when it multiplies eint by (gamma-1) } fsp.minimumRho = rhoMin; status = performFluidUpdate_3D(&fluid[0], &topo, fsp, &tempStorage); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) break; } // This was allocated & re-used many times in performFluidUpdate_3D if((tempStorage.nGPUs != -1) && (status == SUCCESSFUL)) { #ifdef USE_NVTX roctxMark("Large free flux_ML_iface.cu:144"); #endif status = MGA_delete(&tempStorage); } if(status != SUCCESSFUL) { DROP_MEX_ERROR("Fluid update code returned unsuccessfully!"); } #ifdef SYNCMEX MGA_sledgehammerSequentialize(&fluid[0]); #endif #ifdef USE_NVTX roctxRangePop(); #endif } FluidMethods mlmethodToEnum(int mlmethod) { FluidMethods f; switch(mlmethod) { case 1: f = METHOD_HLL; break; case 2: f = METHOD_HLLC; break; case 3: f = METHOD_XINJIN; break; } return f; }
19fbd19a67d34f53942c0f8d968fff4fc7b4cdb4.cu
/* * flux_ML_iface.c * * Created on: Nov 25, 2015 * Author: erik */ #include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif #include "mex.h" #include "mpi.h" #include "cuda.h" #include "nvToolsExt.h" #include "cudaCommon.h" #include "cudaFluidStep.h" #include "flux.h" // Only uncomment this if you plan to debug this file. // This will cause it to require output arguments to return data in, // and perturb code behavior by generating writes to the output debug arrays //#define DEBUGMODE FluidMethods mlmethodToEnum(int mlmethod); #ifdef DEBUGMODE #include "debug_inserts.h" #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { int wanted_nlhs = 0; #ifdef DEBUGMODE wanted_nlhs = 1; #endif if ((nrhs!= 6) || (nlhs != wanted_nlhs)) mexErrMsgTxt("Wrong number of arguments: need flux_ML_iface(fluid, bx, by, bz, [dt, purehydro?, order, step #, step method], run.geometry)\n"); MGArray fluid[5]; #ifdef USE_NVTX nvtxRangePush(".cu flux step"); #endif /* Access bx/by/bz cell-centered arrays if magnetic!!!! */ /* ... */ int idxpost = 4; // 8 for the old way double *scalars = mxGetPr(prhs[idxpost]); if(mxGetNumberOfElements(prhs[idxpost]) != 5) { DROP_MEX_ERROR("Must rx 5 parameters in params vector: [dt, purehydro?, order, step #, step method]"); } double dt = scalars[0]; /* Access lambda (dt / dx) */ int ishydro = scalars[1]; /* determine if purely hydrodynamic */ int sweepDirect = (int)scalars[2]; /* Identify if forwards (sweepDirect = 1) or backwards (-1) */ int stepNum = (int)scalars[3]; /* step number (used to pick the permutation of the fluid propagators) */ int stepMethod = (int)scalars[4]; /* 1=HLL, 2=HLLC, 3=Xin/Jin */ /* Access topology structure */ ParallelTopology topo; FluidStepParams fsp; fsp.geometry = accessMatlabGeometryClass(prhs[idxpost+1]); const mxArray *mxtopo = mxGetProperty(prhs[idxpost+1], 0, "topology"); topoStructureToC(mxtopo, &topo); fsp.dt = dt; fsp.onlyHydro = ishydro; fsp.stepNumber = stepNum; fsp.stepDirection = sweepDirect; fsp.stepMethod = mlmethodToEnum(stepMethod); int numFluids = mxGetNumberOfElements(prhs[0]); int fluidct; CHECK_CUDA_ERROR("entering compiled fluid step"); int status; MGArray tempStorage; tempStorage.nGPUs = -1; // not allocated int numarrays; #ifdef DEBUGMODE numarrays = 6 + DBG_NUMARRAYS; #else #ifdef USE_RK3 numarrays = 11; #else numarrays = 6; #endif #endif for(fluidct = 0; fluidct < numFluids; fluidct++) { ThermoDetails therm = accessMatlabThermoDetails(mxGetProperty(prhs[0], fluidct, "thermoDetails")); status = MGA_accessFluidCanister(prhs[0], fluidct, &fluid[0]); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) break; if(tempStorage.nGPUs == -1) { nvtxMark("flux_ML_iface.cu:109 large malloc 6 arrays"); status = MGA_allocSlab(fluid, &tempStorage, numarrays); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) break; } double rhoMin; mxArray *flprop = mxGetProperty(prhs[0], fluidct, "MINMASS"); if(flprop != NULL) { rhoMin = *((double *)mxGetPr(flprop)); } else { PRINT_FAULT_HEADER; printf("Unable to access fluid(%i).MINMASS property.\n", fluidct); PRINT_FAULT_FOOTER; status = ERROR_NULL_POINTER; break; } fsp.thermoGamma = therm.gamma; fsp.Cisothermal = therm.Cisothermal; if(therm.Cisothermal != -1) { fsp.thermoGamma = 2; // This makes the hydro pressure solver return internal energy when it multiplies eint by (gamma-1) } fsp.minimumRho = rhoMin; status = performFluidUpdate_3D(&fluid[0], &topo, fsp, &tempStorage); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) break; } // This was allocated & re-used many times in performFluidUpdate_3D if((tempStorage.nGPUs != -1) && (status == SUCCESSFUL)) { #ifdef USE_NVTX nvtxMark("Large free flux_ML_iface.cu:144"); #endif status = MGA_delete(&tempStorage); } if(status != SUCCESSFUL) { DROP_MEX_ERROR("Fluid update code returned unsuccessfully!"); } #ifdef SYNCMEX MGA_sledgehammerSequentialize(&fluid[0]); #endif #ifdef USE_NVTX nvtxRangePop(); #endif } FluidMethods mlmethodToEnum(int mlmethod) { FluidMethods f; switch(mlmethod) { case 1: f = METHOD_HLL; break; case 2: f = METHOD_HLLC; break; case 3: f = METHOD_XINJIN; break; } return f; }
8f2c5995a6737c4eaf59143f849f5071ec96fbb7.hip
// !!! This is a file automatically generated by hipify!!! /* just include all kernels. */ #include "extract_kernel.hip" #include "prepare_kernel.cu" #include "reduce_kernel.hip" #include "srad_kernel.hip" #include "srad2_kernel.cu" #include "compress_kernel.cu"
8f2c5995a6737c4eaf59143f849f5071ec96fbb7.cu
/* just include all kernels. */ #include "extract_kernel.cu" #include "prepare_kernel.cu" #include "reduce_kernel.cu" #include "srad_kernel.cu" #include "srad2_kernel.cu" #include "compress_kernel.cu"
5b5498b8c4e9bdef5d05014415bf2d9efbab111d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> __global__ void foo(int *p) { p[threadIdx.x] = threadIdx.x; }
5b5498b8c4e9bdef5d05014415bf2d9efbab111d.cu
#include <cuda.h> __global__ void foo(int *p) { p[threadIdx.x] = threadIdx.x; }
20f6a7de49ae58024bab3b9336991a36333d4e87.hip
// !!! This is a file automatically generated by hipify!!! /* Daniel Willen, 2019 * * Solve the transient heat conduction problem with homogeneous Dirichlet * boundary conditions: * * u(x={0,L}) = u(y={0,L}) = 0 * * and initial condition: * * u(x,y,0) = sin(x) * sin(y) * * on the domain 0 <= x,y <= L, with L = pi. * * This program solves the above problem on a single GPU with the Jacobi method. * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #define PI 3.14159265358979323846 #define MAX_THREADS_DIM 16 // Note that this depends on the hardware /* Note on the structure of this file: * - Cuda device constant memory declarations are at the top * - Functions definitions are in the middle. Functions include: * - - parse_cmdline: Read command-line arguments for domain size * - - jacobi_solver: Advance the soln to the next time step using Jacobi * - - check_error: Calculate the error b/t the numeric and analytic solns * - The `main' function is at the bottom * * Note that it is good practice to use header files and break functions out * into separate files. This has not been done here for simplicity. */ /*** Auxiliary Functions ***/ /* Read the command line inputs */ // - argv[0] is the program name // - argv[1] is the first input (number of points) int parse_cmdline(int argc, char *argv[]) { int nx; if (argc >= 2) { nx = atoi(argv[1]); // Number of grid points printf("Grid is %d by %d\n\n", nx, nx); } else { printf("Input error. Run like: \n\n"); printf(" $ ./parallel.c n\n\n"); printf(" where n is the number of grid cells in one dimension\n"); exit(EXIT_FAILURE); } return nx; } /*** GPU Constants ***/ __constant__ int _nx; __constant__ int _ny; __constant__ double _Lx; __constant__ double _Ly; __constant__ double _dx; __constant__ double _dy; __constant__ double _dt; __constant__ double _D; __constant__ double _pref; /******************************************************************************* * Step IV: Launch the GPU kernel to advance to the next time step with the * * Jacobi method here. * ******************************************************************************/ __global__ void jacobi_solver(double* u, double* u_new) { // int ti = blockIdx.x*blockDim.x + threadIdx.x; // int tj = blockIdx.y*blockDim.y + threadIdx.y; // if ((ti >= 1 && ti < (_nx-1)) && // (tj >= 1 && tj < (_ny-1))) { // u_new[ti + tj*_nx] = // u[ti + tj*_nx] + _pref * ( // u[(ti+1) + tj*_nx] + // u[(ti-1) + tj*_nx] + // u[ti + (tj+1)*_nx] + // u[ti + (tj-1)*_nx] + // u[ti + tj*_nx] * (-4) // ); // } __shared__ double s_u[MAX_THREADS_DIM*MAX_THREADS_DIM]; int si = threadIdx.x; int sj = threadIdx.y; int ti = blockIdx.x*(blockDim.x-2) + threadIdx.x; int tj = blockIdx.y*(blockDim.y-2) + threadIdx.y; if (ti < _nx && tj < _ny) { s_u[si + sj*blockDim.x] = u[ti + tj*_nx]; } __syncthreads(); if ((ti >= 1 && ti < (_nx-1)) && (tj >= 1 && tj < (_ny-1)) && (si > 0 && si < (blockDim.x-1)) && (sj > 0 && sj < (blockDim.y-1))) { u_new[ti + tj*_nx] = s_u[si + sj*blockDim.x] + _pref * ( s_u[(si+1) + sj*blockDim.x] + s_u[(si-1) + sj*blockDim.x] + s_u[si + (sj+1)*blockDim.x] + s_u[si + (sj-1)*blockDim.x] + s_u[si + sj*blockDim.x] * (-4) ); } return; } /****************************************************************************** * Step V: Launch the GPU kernel to calculate the error at each grid point * * here. * *****************************************************************************/ __global__ void check_error(double* u, double* error, double time) { // int ti = blockIdx.x*blockDim.x + threadIdx.x; // int tj = blockIdx.y*blockDim.y + threadIdx.y; // if ((ti >= 1 && ti < (_nx-1)) && // (tj >= 1 && tj < (_ny-1))) { // error[ti + tj*_nx] = u[ti + tj*_nx] / (sin(ti*_dx) * sin(tj*_dy) * exp(-2*_D*time)) - 1; // } int ti = blockIdx.x*(blockDim.x-2) + threadIdx.x; int tj = blockIdx.y*(blockDim.y-2) + threadIdx.y; if ((ti >= 1 && ti < (_nx-1)) && (tj >= 1 && tj < (_ny-1)) && (threadIdx.x > 0 && threadIdx.x < (blockDim.x-1)) && (threadIdx.y > 0 && threadIdx.y < (blockDim.y-1))) { error[ti + tj*_nx] = u[ti + tj*_nx] / (sin(ti*_dx) * sin(tj*_dy) * exp(-2*_D*time)) - 1; } return; } /*** Main Function ***/ int main(int argc, char *argv[]) { /* Variable declaration */ double Lx = PI; // Domain length in x-direction double Ly = PI; // Domain length in y-direction double D = 1.; // Diffusion constant int nx, ny; // Grid points (grid cells + 1) double dx, dy; // Grid spacing double dt; // Time step size double sim_time; // Length of sim time, arbitrary for simplicity double pref; // Pre-factor in the Jacobi method double error = 0.; // Mean percent-difference at each grid point error = error; // To prevent compiler warning /* Parse command-line for problem size */ nx = parse_cmdline(argc, argv); ny = nx; // Assume a square grid /* Initialize variables */ dx = Lx / (nx - 1); // Cell width in x-direction dy = Ly / (ny - 1); // Cell width in y-direction dt = 0.25*dx*dy/D; // Limited by diffusive stability sim_time = Lx*Ly/D; // Arbitrary simulation length pref = D*dt/(dx*dx); // Jacobi pre-factor printf("Parameters\n"); printf("---------------------------\n"); printf("Lx = %.5lf\n", Lx); printf("Lx = %.5lf\n", Ly); printf("T = %.5lf\n", sim_time); printf("D = %.5lf\n", D); printf("nx = %d\n", nx); printf("ny = %d\n", nx); printf("dx = %.5lf\n", dx); printf("dy = %.5lf\n", dy); printf("dt = %.5lf\n", dt); printf("\n"); hipMemcpyToSymbol(_nx, &nx, sizeof(int)); hipMemcpyToSymbol(_ny, &ny, sizeof(int)); hipMemcpyToSymbol(_Lx, &Lx, sizeof(double)); hipMemcpyToSymbol(_Ly, &Ly, sizeof(double)); hipMemcpyToSymbol(_dx, &dx, sizeof(double)); hipMemcpyToSymbol(_dy, &dy, sizeof(double)); hipMemcpyToSymbol(_dt, &dt, sizeof(double)); hipMemcpyToSymbol(_D, &D, sizeof(double)); hipMemcpyToSymbol(_pref, &pref, sizeof(double)); /***************************************************************************** * Step I: Declare, allocate, and initialize memory for the field variable * * u on the CPU. * ****************************************************************************/ double *u; u = (double*) malloc(nx*ny * sizeof(double)); for (int i = 0; i < nx; i++) { for (int j = 0; j < ny; j++) { u[i+j*nx] = sin(i*dx) * sin(j*dy); } } /***************************************************************************** * Step II: Declare and allocate GPU memory for _u, _u_new, and _error. Copy * * the initial condition to the GPU. * ****************************************************************************/ double *_u, *_u_new, *_error; hipMalloc(&_u, nx*ny * sizeof(double)); hipMalloc(&_u_new, nx*ny * sizeof(double)); hipMalloc(&_error, nx*ny * sizeof(double)); hipMemcpy(_u, u, nx*ny * sizeof(double), hipMemcpyHostToDevice); // Set the new soln and error to 0 hipMemset(_u_new, 0., nx*ny * sizeof(double)); hipMemset(_error, 0., nx*ny * sizeof(double)); // Create thrust pointers to device memory for error calculation thrust::device_ptr<double> t_error(_error); /***************************************************************************** * Step III: Set up the kernel execution configuration for the domain based * * on the input domain size and the MAX_THREADS_DIM variable. * ****************************************************************************/ int threads_x = MAX_THREADS_DIM; int threads_y = MAX_THREADS_DIM; // int blocks_x = (int) ceil((double) nx / (double) threads_x); // int blocks_y = (int) ceil((double) ny / (double) threads_y); int blocks_x = (int) ceil((double) nx / (double) (threads_x - 2)); int blocks_y = (int) ceil((double) ny / (double) (threads_y - 2)); dim3 dim_blocks(threads_x, threads_y); dim3 num_blocks(blocks_x, blocks_y); printf("Parallelization\n"); printf("---------------------------\n"); printf("MAX_THREADS_DIM = %d\n", MAX_THREADS_DIM); printf("threads_x = %d\n", threads_x); printf("threads_y = %d\n", threads_y); printf("blocks_x = %d\n", blocks_x); printf("blocks_y = %d\n", blocks_y); printf("\n"); /***************************/ /* Main Time-Stepping Loop */ /***************************/ for (double time = 0.; time <= sim_time; time += dt) { /*************************************************************************** * Step IV: Launch the GPU kernel to advance to the next time step with * * the Jacobi method here. * **************************************************************************/ hipLaunchKernelGGL(( jacobi_solver), dim3(num_blocks), dim3(dim_blocks), 0, 0, _u, _u_new); /*************************************************************************** * Step V: Launch the GPU kernel to calculate the error at each grid point * * here. * **************************************************************************/ hipLaunchKernelGGL(( check_error), dim3(num_blocks), dim3(dim_blocks), 0, 0, _u, _error, time); // Use thrust to do a parallel reduction on the error error = thrust::reduce(t_error, t_error + nx*ny, 0., thrust::plus<double>()); printf("Error at t* = %.5lf is %e\n", time*D/(Lx*Lx), error/(nx*ny)); // Copy new soln to old. This also blocks to ensure computations are finished. hipMemcpy(_u, _u_new, nx*ny * sizeof(double), hipMemcpyDeviceToDevice); } /***************************************************************************** * Step VI: Copy the memory back to the CPU. * ****************************************************************************/ hipMemcpy(u, _u, nx*ny * sizeof(double), hipMemcpyDeviceToHost); /***************************************************************************** * Step I and Step II: Free the memory that you declared and allocated * * earlier in the program. * ****************************************************************************/ hipFree(_u); hipFree(_u_new); hipFree(_error); free(u); return EXIT_SUCCESS; }
20f6a7de49ae58024bab3b9336991a36333d4e87.cu
/* Daniel Willen, 2019 * * Solve the transient heat conduction problem with homogeneous Dirichlet * boundary conditions: * * u(x={0,L}) = u(y={0,L}) = 0 * * and initial condition: * * u(x,y,0) = sin(x) * sin(y) * * on the domain 0 <= x,y <= L, with L = pi. * * This program solves the above problem on a single GPU with the Jacobi method. * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #define PI 3.14159265358979323846 #define MAX_THREADS_DIM 16 // Note that this depends on the hardware /* Note on the structure of this file: * - Cuda device constant memory declarations are at the top * - Functions definitions are in the middle. Functions include: * - - parse_cmdline: Read command-line arguments for domain size * - - jacobi_solver: Advance the soln to the next time step using Jacobi * - - check_error: Calculate the error b/t the numeric and analytic solns * - The `main' function is at the bottom * * Note that it is good practice to use header files and break functions out * into separate files. This has not been done here for simplicity. */ /*** Auxiliary Functions ***/ /* Read the command line inputs */ // - argv[0] is the program name // - argv[1] is the first input (number of points) int parse_cmdline(int argc, char *argv[]) { int nx; if (argc >= 2) { nx = atoi(argv[1]); // Number of grid points printf("Grid is %d by %d\n\n", nx, nx); } else { printf("Input error. Run like: \n\n"); printf(" $ ./parallel.c n\n\n"); printf(" where n is the number of grid cells in one dimension\n"); exit(EXIT_FAILURE); } return nx; } /*** GPU Constants ***/ __constant__ int _nx; __constant__ int _ny; __constant__ double _Lx; __constant__ double _Ly; __constant__ double _dx; __constant__ double _dy; __constant__ double _dt; __constant__ double _D; __constant__ double _pref; /******************************************************************************* * Step IV: Launch the GPU kernel to advance to the next time step with the * * Jacobi method here. * ******************************************************************************/ __global__ void jacobi_solver(double* u, double* u_new) { // int ti = blockIdx.x*blockDim.x + threadIdx.x; // int tj = blockIdx.y*blockDim.y + threadIdx.y; // if ((ti >= 1 && ti < (_nx-1)) && // (tj >= 1 && tj < (_ny-1))) { // u_new[ti + tj*_nx] = // u[ti + tj*_nx] + _pref * ( // u[(ti+1) + tj*_nx] + // u[(ti-1) + tj*_nx] + // u[ti + (tj+1)*_nx] + // u[ti + (tj-1)*_nx] + // u[ti + tj*_nx] * (-4) // ); // } __shared__ double s_u[MAX_THREADS_DIM*MAX_THREADS_DIM]; int si = threadIdx.x; int sj = threadIdx.y; int ti = blockIdx.x*(blockDim.x-2) + threadIdx.x; int tj = blockIdx.y*(blockDim.y-2) + threadIdx.y; if (ti < _nx && tj < _ny) { s_u[si + sj*blockDim.x] = u[ti + tj*_nx]; } __syncthreads(); if ((ti >= 1 && ti < (_nx-1)) && (tj >= 1 && tj < (_ny-1)) && (si > 0 && si < (blockDim.x-1)) && (sj > 0 && sj < (blockDim.y-1))) { u_new[ti + tj*_nx] = s_u[si + sj*blockDim.x] + _pref * ( s_u[(si+1) + sj*blockDim.x] + s_u[(si-1) + sj*blockDim.x] + s_u[si + (sj+1)*blockDim.x] + s_u[si + (sj-1)*blockDim.x] + s_u[si + sj*blockDim.x] * (-4) ); } return; } /****************************************************************************** * Step V: Launch the GPU kernel to calculate the error at each grid point * * here. * *****************************************************************************/ __global__ void check_error(double* u, double* error, double time) { // int ti = blockIdx.x*blockDim.x + threadIdx.x; // int tj = blockIdx.y*blockDim.y + threadIdx.y; // if ((ti >= 1 && ti < (_nx-1)) && // (tj >= 1 && tj < (_ny-1))) { // error[ti + tj*_nx] = u[ti + tj*_nx] / (sin(ti*_dx) * sin(tj*_dy) * exp(-2*_D*time)) - 1; // } int ti = blockIdx.x*(blockDim.x-2) + threadIdx.x; int tj = blockIdx.y*(blockDim.y-2) + threadIdx.y; if ((ti >= 1 && ti < (_nx-1)) && (tj >= 1 && tj < (_ny-1)) && (threadIdx.x > 0 && threadIdx.x < (blockDim.x-1)) && (threadIdx.y > 0 && threadIdx.y < (blockDim.y-1))) { error[ti + tj*_nx] = u[ti + tj*_nx] / (sin(ti*_dx) * sin(tj*_dy) * exp(-2*_D*time)) - 1; } return; } /*** Main Function ***/ int main(int argc, char *argv[]) { /* Variable declaration */ double Lx = PI; // Domain length in x-direction double Ly = PI; // Domain length in y-direction double D = 1.; // Diffusion constant int nx, ny; // Grid points (grid cells + 1) double dx, dy; // Grid spacing double dt; // Time step size double sim_time; // Length of sim time, arbitrary for simplicity double pref; // Pre-factor in the Jacobi method double error = 0.; // Mean percent-difference at each grid point error = error; // To prevent compiler warning /* Parse command-line for problem size */ nx = parse_cmdline(argc, argv); ny = nx; // Assume a square grid /* Initialize variables */ dx = Lx / (nx - 1); // Cell width in x-direction dy = Ly / (ny - 1); // Cell width in y-direction dt = 0.25*dx*dy/D; // Limited by diffusive stability sim_time = Lx*Ly/D; // Arbitrary simulation length pref = D*dt/(dx*dx); // Jacobi pre-factor printf("Parameters\n"); printf("---------------------------\n"); printf("Lx = %.5lf\n", Lx); printf("Lx = %.5lf\n", Ly); printf("T = %.5lf\n", sim_time); printf("D = %.5lf\n", D); printf("nx = %d\n", nx); printf("ny = %d\n", nx); printf("dx = %.5lf\n", dx); printf("dy = %.5lf\n", dy); printf("dt = %.5lf\n", dt); printf("\n"); cudaMemcpyToSymbol(_nx, &nx, sizeof(int)); cudaMemcpyToSymbol(_ny, &ny, sizeof(int)); cudaMemcpyToSymbol(_Lx, &Lx, sizeof(double)); cudaMemcpyToSymbol(_Ly, &Ly, sizeof(double)); cudaMemcpyToSymbol(_dx, &dx, sizeof(double)); cudaMemcpyToSymbol(_dy, &dy, sizeof(double)); cudaMemcpyToSymbol(_dt, &dt, sizeof(double)); cudaMemcpyToSymbol(_D, &D, sizeof(double)); cudaMemcpyToSymbol(_pref, &pref, sizeof(double)); /***************************************************************************** * Step I: Declare, allocate, and initialize memory for the field variable * * u on the CPU. * ****************************************************************************/ double *u; u = (double*) malloc(nx*ny * sizeof(double)); for (int i = 0; i < nx; i++) { for (int j = 0; j < ny; j++) { u[i+j*nx] = sin(i*dx) * sin(j*dy); } } /***************************************************************************** * Step II: Declare and allocate GPU memory for _u, _u_new, and _error. Copy * * the initial condition to the GPU. * ****************************************************************************/ double *_u, *_u_new, *_error; cudaMalloc(&_u, nx*ny * sizeof(double)); cudaMalloc(&_u_new, nx*ny * sizeof(double)); cudaMalloc(&_error, nx*ny * sizeof(double)); cudaMemcpy(_u, u, nx*ny * sizeof(double), cudaMemcpyHostToDevice); // Set the new soln and error to 0 cudaMemset(_u_new, 0., nx*ny * sizeof(double)); cudaMemset(_error, 0., nx*ny * sizeof(double)); // Create thrust pointers to device memory for error calculation thrust::device_ptr<double> t_error(_error); /***************************************************************************** * Step III: Set up the kernel execution configuration for the domain based * * on the input domain size and the MAX_THREADS_DIM variable. * ****************************************************************************/ int threads_x = MAX_THREADS_DIM; int threads_y = MAX_THREADS_DIM; // int blocks_x = (int) ceil((double) nx / (double) threads_x); // int blocks_y = (int) ceil((double) ny / (double) threads_y); int blocks_x = (int) ceil((double) nx / (double) (threads_x - 2)); int blocks_y = (int) ceil((double) ny / (double) (threads_y - 2)); dim3 dim_blocks(threads_x, threads_y); dim3 num_blocks(blocks_x, blocks_y); printf("Parallelization\n"); printf("---------------------------\n"); printf("MAX_THREADS_DIM = %d\n", MAX_THREADS_DIM); printf("threads_x = %d\n", threads_x); printf("threads_y = %d\n", threads_y); printf("blocks_x = %d\n", blocks_x); printf("blocks_y = %d\n", blocks_y); printf("\n"); /***************************/ /* Main Time-Stepping Loop */ /***************************/ for (double time = 0.; time <= sim_time; time += dt) { /*************************************************************************** * Step IV: Launch the GPU kernel to advance to the next time step with * * the Jacobi method here. * **************************************************************************/ jacobi_solver<<<num_blocks, dim_blocks>>>(_u, _u_new); /*************************************************************************** * Step V: Launch the GPU kernel to calculate the error at each grid point * * here. * **************************************************************************/ check_error<<<num_blocks, dim_blocks>>>(_u, _error, time); // Use thrust to do a parallel reduction on the error error = thrust::reduce(t_error, t_error + nx*ny, 0., thrust::plus<double>()); printf("Error at t* = %.5lf is %e\n", time*D/(Lx*Lx), error/(nx*ny)); // Copy new soln to old. This also blocks to ensure computations are finished. cudaMemcpy(_u, _u_new, nx*ny * sizeof(double), cudaMemcpyDeviceToDevice); } /***************************************************************************** * Step VI: Copy the memory back to the CPU. * ****************************************************************************/ cudaMemcpy(u, _u, nx*ny * sizeof(double), cudaMemcpyDeviceToHost); /***************************************************************************** * Step I and Step II: Free the memory that you declared and allocated * * earlier in the program. * ****************************************************************************/ cudaFree(_u); cudaFree(_u_new); cudaFree(_error); free(u); return EXIT_SUCCESS; }
7fca001e2e71e90213dfa7a91b724b7d4af98f33.hip
// !!! This is a file automatically generated by hipify!!! /** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/cgemv_offset.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 2.0.0 * @author Ahmad Abdelfattah * @date 2017-11-13 **/ #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <rocblas.h> #include "gemv_offset_core.cuh" #if(SM >= 30) #define cgemvn_offset_bs (32) #define cgemvn_offset_ty (8) #define cgemvn_offset_by (4) #define cgemvt_offset_bs (32) #define cgemvt_offset_ty (4) #define cgemvt_offset_by (4) #else #define cgemvn_offset_bs (32) #define cgemvn_offset_ty (4) #define cgemvn_offset_by (2) #define cgemvt_offset_bs (32) #define cgemvt_offset_ty (4) #define cgemvt_offset_by (2) #endif extern "C" int kblas_cscal_async(int n, cuFloatComplex alpha, cuFloatComplex *x, int incx, hipStream_t stream); int kblas_cgemv_offset_driver( char trans, int rows, int cols, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, int offset_r, int offset_c, hipStream_t stream = 0) { if(trans == 'n' || trans == 'N') { //**** Config parameters const int thread_x = cgemvn_offset_bs; const int thread_y = cgemvn_offset_ty; const int elements_per_thread = thread_x/(2*thread_y); const int grid_y_n = cgemvn_offset_by; //************************* /** offset necessary calculation **/ int offset_r_ = offset_r % cgemvn_offset_bs; int offset_c_ = offset_c % cgemvn_offset_bs; int total_blocks_skipped_r = offset_r / cgemvn_offset_bs; int total_blocks_skipped_c = offset_c / cgemvn_offset_bs; int my_skipped_blocks_r = total_blocks_skipped_r; int my_skipped_blocks_c = total_blocks_skipped_c/ngpus; if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1; int ref_gpu = total_blocks_skipped_c%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks_c * cgemvn_offset_bs * lda; dA += my_skipped_blocks_r * cgemvn_offset_bs; dX += my_skipped_blocks_c * cgemvn_offset_bs * incx; dY += my_skipped_blocks_r * cgemvn_offset_bs * incy; rows -= my_skipped_blocks_r * cgemvn_offset_bs; cols -= my_skipped_blocks_c * cgemvn_offset_bs; /** end offset necessary calculation **/ int nstripes = (cols/cgemvn_offset_bs) + ((cols%cgemvn_offset_bs) != 0); // scaling with beta //if(gpu_gid == 0)hipblasSscal(rows-offset_, beta, dY+(offset_*incy), incy); if(gpu_gid == 0)kblas_cscal_async(rows-offset_r_, beta, dY+(offset_r_*incy), incy, stream); int cols_ = cgemvn_offset_bs * ( (cols/cgemvn_offset_bs)/ngpus ); if(new_gpu_gid < (cols/cgemvn_offset_bs)%ngpus) cols_ += cgemvn_offset_bs; if(new_gpu_gid == (cols/cgemvn_offset_bs)%ngpus) cols_ += cols%cgemvn_offset_bs; int mod_r = rows % cgemvn_offset_bs; int mod_c = cols_ % cgemvn_offset_bs; if(mod_r == 0 && mod_c == 0) { // special case int blocks = rows/cgemvn_offset_bs; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; hipLaunchKernelGGL(( gemvn_special_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread>) , dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_r_, offset_c_); } else { // generic case for columns only const int irregular_cols = mod_c % elements_per_thread; int blocks = (rows/cgemvn_offset_bs) + (mod_r != 0); if(mod_r == 0)blocks += 1; // dummy thread block, will return immediately if mod_r == 0 dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 1:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 2:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 3:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 4:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 5:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 6:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 7:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 8:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 9:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 10:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 11:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 12:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 13:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 14:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 15:hipLaunchKernelGGL(( gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; default: printf("CGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } // end of non-transpose case else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') { int conj; if(trans == 'c' || trans == 'C') conj = 1; else conj = 0; //**** Config parameters const int thread_x = cgemvt_offset_bs; const int thread_y = cgemvt_offset_ty; const int elements_per_thread = thread_x/(2*thread_y); const int grid_y_t = cgemvt_offset_by; //************************* /** offset necessary calculation **/ int offset_r_ = offset_r % cgemvt_offset_bs; int offset_c_ = offset_c % cgemvt_offset_bs; int total_blocks_skipped_r = offset_r / cgemvt_offset_bs; int total_blocks_skipped_c = offset_c / cgemvt_offset_bs; int my_skipped_blocks_r = total_blocks_skipped_r; int my_skipped_blocks_c = total_blocks_skipped_c/ngpus; if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1; int ref_gpu = total_blocks_skipped_c%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; //if(new_gpu_gid != 3){return 0;} // Advance pointers accordingly dA += my_skipped_blocks_c * cgemvt_offset_bs * lda; dA += my_skipped_blocks_r * cgemvt_offset_bs; dX += my_skipped_blocks_r * cgemvt_offset_bs * incx; dY += my_skipped_blocks_c * cgemvt_offset_bs * incy; rows -= my_skipped_blocks_r * cgemvt_offset_bs; cols -= my_skipped_blocks_c * cgemvt_offset_bs; /** end offset necessary calculation **/ int nstripes = (cols/cgemvt_offset_bs) + ((cols%cgemvt_offset_bs) != 0); // scaling with beta //if(gpu_gid == 0)hipblasSscal(cols-offset_, beta, dY+(offset_*incy), incy); if(gpu_gid == 0)kblas_cscal_async(cols-offset_c_, beta, dY+(offset_c_*incy), incy, stream); int cols_ = cgemvt_offset_bs * ( (cols/cgemvt_offset_bs)/ngpus ); if(new_gpu_gid < (cols/cgemvt_offset_bs)%ngpus) cols_ += cgemvt_offset_bs; if(new_gpu_gid == (cols/cgemvt_offset_bs)%ngpus) cols_ += cols%cgemvt_offset_bs; int mod_r = rows % cgemvt_offset_bs; int mod_c = cols_ % cgemvt_offset_bs; if(mod_r == 0 && mod_c == 0) { int blocks = cols_/cgemvt_offset_bs; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; hipLaunchKernelGGL(( gemvt_special_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_r_, offset_c_, conj); } else { const int irregular_cols = mod_c % elements_per_thread; int blocks = cols_/cgemvt_offset_bs + (mod_c != 0); int gpu_last = (nstripes+ngpus-1)%ngpus; if(mod_c == 0 && new_gpu_gid == gpu_last) blocks += 1; // dummy thread block, will return if mod_c == 0 dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 1:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 2:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 3:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 4:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 5:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 6:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 7:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 8:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 9:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 10:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 11:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 12:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 13:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 14:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 15:hipLaunchKernelGGL(( gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; default: printf("CGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else { printf("CGEMV error: Unrecognized transpose mode %c \n", trans); return -1; } return 0; } /***********************************************************************************/ extern "C" int kblas_cgemv_offset( char trans, int rows, int cols, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, int offset_r, int offset_c) { return kblas_cgemv_offset_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, offset_r, offset_c); } /*************************************************************************************/ extern "C" int kblas_cgemv_offset_async( char trans, int rows, int cols, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, int offset_r, int offset_c, hipStream_t stream) { return kblas_cgemv_offset_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, offset_r, offset_c, stream); } /*************************************************************************************/
7fca001e2e71e90213dfa7a91b724b7d4af98f33.cu
/** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/cgemv_offset.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 2.0.0 * @author Ahmad Abdelfattah * @date 2017-11-13 **/ #include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <cublas.h> #include "gemv_offset_core.cuh" #if(SM >= 30) #define cgemvn_offset_bs (32) #define cgemvn_offset_ty (8) #define cgemvn_offset_by (4) #define cgemvt_offset_bs (32) #define cgemvt_offset_ty (4) #define cgemvt_offset_by (4) #else #define cgemvn_offset_bs (32) #define cgemvn_offset_ty (4) #define cgemvn_offset_by (2) #define cgemvt_offset_bs (32) #define cgemvt_offset_ty (4) #define cgemvt_offset_by (2) #endif extern "C" int kblas_cscal_async(int n, cuFloatComplex alpha, cuFloatComplex *x, int incx, cudaStream_t stream); int kblas_cgemv_offset_driver( char trans, int rows, int cols, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, int offset_r, int offset_c, cudaStream_t stream = 0) { if(trans == 'n' || trans == 'N') { //**** Config parameters const int thread_x = cgemvn_offset_bs; const int thread_y = cgemvn_offset_ty; const int elements_per_thread = thread_x/(2*thread_y); const int grid_y_n = cgemvn_offset_by; //************************* /** offset necessary calculation **/ int offset_r_ = offset_r % cgemvn_offset_bs; int offset_c_ = offset_c % cgemvn_offset_bs; int total_blocks_skipped_r = offset_r / cgemvn_offset_bs; int total_blocks_skipped_c = offset_c / cgemvn_offset_bs; int my_skipped_blocks_r = total_blocks_skipped_r; int my_skipped_blocks_c = total_blocks_skipped_c/ngpus; if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1; int ref_gpu = total_blocks_skipped_c%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks_c * cgemvn_offset_bs * lda; dA += my_skipped_blocks_r * cgemvn_offset_bs; dX += my_skipped_blocks_c * cgemvn_offset_bs * incx; dY += my_skipped_blocks_r * cgemvn_offset_bs * incy; rows -= my_skipped_blocks_r * cgemvn_offset_bs; cols -= my_skipped_blocks_c * cgemvn_offset_bs; /** end offset necessary calculation **/ int nstripes = (cols/cgemvn_offset_bs) + ((cols%cgemvn_offset_bs) != 0); // scaling with beta //if(gpu_gid == 0)cublasSscal(rows-offset_, beta, dY+(offset_*incy), incy); if(gpu_gid == 0)kblas_cscal_async(rows-offset_r_, beta, dY+(offset_r_*incy), incy, stream); int cols_ = cgemvn_offset_bs * ( (cols/cgemvn_offset_bs)/ngpus ); if(new_gpu_gid < (cols/cgemvn_offset_bs)%ngpus) cols_ += cgemvn_offset_bs; if(new_gpu_gid == (cols/cgemvn_offset_bs)%ngpus) cols_ += cols%cgemvn_offset_bs; int mod_r = rows % cgemvn_offset_bs; int mod_c = cols_ % cgemvn_offset_bs; if(mod_r == 0 && mod_c == 0) { // special case int blocks = rows/cgemvn_offset_bs; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; gemvn_special_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread> <<<dimGrid, dimBlock, 0, stream>>> (rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_r_, offset_c_); } else { // generic case for columns only const int irregular_cols = mod_c % elements_per_thread; int blocks = (rows/cgemvn_offset_bs) + (mod_r != 0); if(mod_r == 0)blocks += 1; // dummy thread block, will return immediately if mod_r == 0 dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 1: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 2: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 3: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 4: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 5: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 6: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 7: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 8: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 9: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 10: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 11: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 12: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 13: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 14: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; case 15: gemvn_generic_offset<cuFloatComplex, cgemvn_offset_bs, cgemvn_offset_bs, cgemvn_offset_ty, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break; default: printf("CGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } // end of non-transpose case else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') { int conj; if(trans == 'c' || trans == 'C') conj = 1; else conj = 0; //**** Config parameters const int thread_x = cgemvt_offset_bs; const int thread_y = cgemvt_offset_ty; const int elements_per_thread = thread_x/(2*thread_y); const int grid_y_t = cgemvt_offset_by; //************************* /** offset necessary calculation **/ int offset_r_ = offset_r % cgemvt_offset_bs; int offset_c_ = offset_c % cgemvt_offset_bs; int total_blocks_skipped_r = offset_r / cgemvt_offset_bs; int total_blocks_skipped_c = offset_c / cgemvt_offset_bs; int my_skipped_blocks_r = total_blocks_skipped_r; int my_skipped_blocks_c = total_blocks_skipped_c/ngpus; if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1; int ref_gpu = total_blocks_skipped_c%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; //if(new_gpu_gid != 3){return 0;} // Advance pointers accordingly dA += my_skipped_blocks_c * cgemvt_offset_bs * lda; dA += my_skipped_blocks_r * cgemvt_offset_bs; dX += my_skipped_blocks_r * cgemvt_offset_bs * incx; dY += my_skipped_blocks_c * cgemvt_offset_bs * incy; rows -= my_skipped_blocks_r * cgemvt_offset_bs; cols -= my_skipped_blocks_c * cgemvt_offset_bs; /** end offset necessary calculation **/ int nstripes = (cols/cgemvt_offset_bs) + ((cols%cgemvt_offset_bs) != 0); // scaling with beta //if(gpu_gid == 0)cublasSscal(cols-offset_, beta, dY+(offset_*incy), incy); if(gpu_gid == 0)kblas_cscal_async(cols-offset_c_, beta, dY+(offset_c_*incy), incy, stream); int cols_ = cgemvt_offset_bs * ( (cols/cgemvt_offset_bs)/ngpus ); if(new_gpu_gid < (cols/cgemvt_offset_bs)%ngpus) cols_ += cgemvt_offset_bs; if(new_gpu_gid == (cols/cgemvt_offset_bs)%ngpus) cols_ += cols%cgemvt_offset_bs; int mod_r = rows % cgemvt_offset_bs; int mod_c = cols_ % cgemvt_offset_bs; if(mod_r == 0 && mod_c == 0) { int blocks = cols_/cgemvt_offset_bs; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; gemvt_special_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_r_, offset_c_, conj); } else { const int irregular_cols = mod_c % elements_per_thread; int blocks = cols_/cgemvt_offset_bs + (mod_c != 0); int gpu_last = (nstripes+ngpus-1)%ngpus; if(mod_c == 0 && new_gpu_gid == gpu_last) blocks += 1; // dummy thread block, will return if mod_c == 0 dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 1: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 2: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 3: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 4: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 5: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 6: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 7: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 8: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 9: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 10: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 11: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 12: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 13: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 14: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; case 15: gemvt_generic_offset<cuFloatComplex, cgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break; default: printf("CGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else { printf("CGEMV error: Unrecognized transpose mode %c \n", trans); return -1; } return 0; } /***********************************************************************************/ extern "C" int kblas_cgemv_offset( char trans, int rows, int cols, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, int offset_r, int offset_c) { return kblas_cgemv_offset_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, offset_r, offset_c); } /*************************************************************************************/ extern "C" int kblas_cgemv_offset_async( char trans, int rows, int cols, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, int offset_r, int offset_c, cudaStream_t stream) { return kblas_cgemv_offset_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, offset_r, offset_c, stream); } /*************************************************************************************/
edc600e1a5e73e7db2fbb6c67fce645a20c6708c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <string> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include "image_processor.cuh" /** * Convolution function for cuda. Destination is expected to have the same width/height as source, but there will be a border * of floor(kWidth/2) pixels left and right and floor(kHeight/2) pixels top and bottom * * @param source Source image host pinned memory pointer * @param width Source image width * @param height Source image height * @param paddingX source image padding along x * @param paddingY source image padding along y * @param kOffset offset into kernel store constant memory * @param kWidth kernel width * @param kHeight kernel height * @param destination Destination image host pinned memory pointer */ __global__ void convolve(unsigned char *source, int width, int height, int paddingX, int paddingY, ssize_t kOffset, int kWidth, int kHeight, unsigned char *destination) { // Calculate our pixel's location int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; float sum = 0.0f; int pWidth = kWidth/2; int pHeight = kHeight/2; // Execute for valid pixels if(x >= pWidth+paddingX && y >= pHeight+paddingY && x < (gridDim.x * blockDim.x)-pWidth-paddingX && y < (gridDim.y *blockDim.y )-pHeight-paddingY) { for(int j = -pHeight; j <= pHeight; j++) { for(int i = -pWidth; i <= pWidth; i++) { // Sample the weight for this location int ki = (i+pWidth); int kj = (j+pHeight); float w = convolutionKernel[(kj * kWidth) + ki + kOffset]; sum += w * float(source[((y+j) * width) + (x+i)]); } } } // Average the sum destination[(y * width) + x] = (unsigned char) sum; } int main (int argc, char** argv) { // Open a webcamera cv::VideoCapture camera(0); cv::Mat frame; if(!camera.isOpened()) return -1; // Create the capture windows cv::namedWindow("Source"); cv::namedWindow("Grayscale"); cv::namedWindow("Blurred"); cv::namedWindow("Gaussian"); cv::namedWindow ("Box"); cv::namedWindow ("sobel"); // Create the gaussian kernel // Credits, gaussian generation was created using the code from :https://sgsawant.wordpress.com/2009/11/05/generation-of-gaussian-kernel-mask/. const float gaussianKernel5x5[25] = { 4.f/240.f, 8.f/240.f, 10.f/240.f, 8.f/240.f, 4.f/240.f, 8.f/240.f, 18.f/240.f, 24.f/240.f, 18.f/240.f, 8.f/240.f, 10.f/240.f, 24.f/240.f, 30.f/240.f, 24.f/240.f, 10.f/240.f, 8.f/240.f, 18.f/240.f, 24.f/240.f, 18.f/240.f, 8.f/240.f, 4.f/240.f, 16.f/240.f, 10.f/240.f, 8.f/240.f, 4.f/240.f, }; hipMemcpyToSymbol(convolutionKernel, gaussianKernel5x5, sizeof(gaussianKernel5x5), 0); // Sobel gradient kernels /* Sobel Gradient kernel source :http://homepages.inf.ed.ac.uk/rbf/HIPR2/sobel.htm */ // Note: Angle is taken as zero. const float sobelGX[9] = { -1.f, 0.f, 1.f, -2.f, 0.f, 2.f, -1.f, 0.f, 1.f, }; const float sobelGY[9] = { 1.f, 2.f, 1.f, 0.f, 0.f, 0.f, -1.f, -2.f, -1.f, }; hipMemcpyToSymbol(convolutionKernel, sobelGX, sizeof(sobelGX), sizeof(gaussianKernel5x5)); hipMemcpyToSymbol(convolutionKernel, sobelGY, sizeof(sobelGY), sizeof(gaussianKernel5x5) + sizeof(sobelGX)); const ssize_t sobelGradientXOffset = sizeof(gaussianKernel5x5)/sizeof(float); const ssize_t sobelGradientYOffset = sizeof(sobelGX)/sizeof(float) + sobelGradientXOffset; // Create CPU/GPU shared images - one for the initial and one for the result camera >> frame; unsigned char *sourceDataDevice, *blurredDataDevice, *edgesDataDevice,*boxDataDevice,*sobelDataDevice; cv::Mat source (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &sourceDataDevice)); cv::Mat blurred (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &blurredDataDevice)); cv::Mat edges (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &edgesDataDevice)); cv::Mat Imagebuffer2 (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &boxDataDevice)); cv::Mat Imagebuffer3 (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &sobelDataDevice)); // Create two temporary images (for holding sobel gradients) unsigned char *deviceGradientX, *deviceGradientY; hipMalloc(&deviceGradientX, frame.size().width * frame.size().height); hipMalloc(&deviceGradientY, frame.size().width * frame.size().height); // Loop while capturing images while(1) { // Capture the image from the camera object; camera >> frame; // convert the image to gray scale cv::cvtColor(frame, source, CV_BGR2GRAY); // Record the time it takes to process { // convolution kernel launch parameters dim3 cblocks (frame.size().width / 16, frame.size().height / 16); dim3 cthreads(16, 16); // pythagorean kernel (resultant vector) launch paramters dim3 pblocks (frame.size().width * frame.size().height / 256); dim3 pthreads(256, 1); boxfilter(frame.size().width, frame.size().height, source.data, Imagebuffer2.data, 3, 3); sobelfilter(frame.size().width, frame.size().height, Imagebuffer2.data, Imagebuffer3.data); // Perform the gaussian blur (first kernel in store @ 0) hipLaunchKernelGGL(( convolve), dim3(cblocks),dim3(cthreads), 0, 0, sourceDataDevice, frame.size().width, frame.size().height, 0, 0, 0, 5, 5, blurredDataDevice); // Perform the sobel gradient convolutions (x&y padding is now 2 because there is a border of 2 around a 5x5 gaussian filtered image) hipLaunchKernelGGL(( convolve), dim3(cblocks),dim3(cthreads), 0, 0, blurredDataDevice, frame.size().width, frame.size().height, 2, 2, sobelGradientXOffset, 3, 3, deviceGradientX); hipLaunchKernelGGL(( convolve), dim3(cblocks),dim3(cthreads), 0, 0, blurredDataDevice, frame.size().width, frame.size().height, 2, 2, sobelGradientYOffset, 3, 3, deviceGradientY); hipLaunchKernelGGL(( resultant), dim3(pblocks),dim3(pthreads), 0, 0, deviceGradientX, deviceGradientY, edgesDataDevice); hipDeviceSynchronize (); } // Show the results cv::imshow("Source", frame); cv::imshow("Greyscale", source); cv::imshow("Blurred", blurred); cv::imshow("Gaussian", edges); cv::imshow("BOX",Imagebuffer2); cv::imshow("sobel",Imagebuffer3); // Spin if(cv::waitKey(1) == 27) break; } // Exit hipHostFree(source.data); hipHostFree(blurred.data); hipHostFree(edges.data); hipFree(deviceGradientX); hipFree(deviceGradientY); return 0; }
edc600e1a5e73e7db2fbb6c67fce645a20c6708c.cu
#include <iostream> #include <string> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include "image_processor.cuh" /** * Convolution function for cuda. Destination is expected to have the same width/height as source, but there will be a border * of floor(kWidth/2) pixels left and right and floor(kHeight/2) pixels top and bottom * * @param source Source image host pinned memory pointer * @param width Source image width * @param height Source image height * @param paddingX source image padding along x * @param paddingY source image padding along y * @param kOffset offset into kernel store constant memory * @param kWidth kernel width * @param kHeight kernel height * @param destination Destination image host pinned memory pointer */ __global__ void convolve(unsigned char *source, int width, int height, int paddingX, int paddingY, ssize_t kOffset, int kWidth, int kHeight, unsigned char *destination) { // Calculate our pixel's location int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; float sum = 0.0f; int pWidth = kWidth/2; int pHeight = kHeight/2; // Execute for valid pixels if(x >= pWidth+paddingX && y >= pHeight+paddingY && x < (gridDim.x * blockDim.x)-pWidth-paddingX && y < (gridDim.y *blockDim.y )-pHeight-paddingY) { for(int j = -pHeight; j <= pHeight; j++) { for(int i = -pWidth; i <= pWidth; i++) { // Sample the weight for this location int ki = (i+pWidth); int kj = (j+pHeight); float w = convolutionKernel[(kj * kWidth) + ki + kOffset]; sum += w * float(source[((y+j) * width) + (x+i)]); } } } // Average the sum destination[(y * width) + x] = (unsigned char) sum; } int main (int argc, char** argv) { // Open a webcamera cv::VideoCapture camera(0); cv::Mat frame; if(!camera.isOpened()) return -1; // Create the capture windows cv::namedWindow("Source"); cv::namedWindow("Grayscale"); cv::namedWindow("Blurred"); cv::namedWindow("Gaussian"); cv::namedWindow ("Box"); cv::namedWindow ("sobel"); // Create the gaussian kernel // Credits, gaussian generation was created using the code from :https://sgsawant.wordpress.com/2009/11/05/generation-of-gaussian-kernel-mask/. const float gaussianKernel5x5[25] = { 4.f/240.f, 8.f/240.f, 10.f/240.f, 8.f/240.f, 4.f/240.f, 8.f/240.f, 18.f/240.f, 24.f/240.f, 18.f/240.f, 8.f/240.f, 10.f/240.f, 24.f/240.f, 30.f/240.f, 24.f/240.f, 10.f/240.f, 8.f/240.f, 18.f/240.f, 24.f/240.f, 18.f/240.f, 8.f/240.f, 4.f/240.f, 16.f/240.f, 10.f/240.f, 8.f/240.f, 4.f/240.f, }; cudaMemcpyToSymbol(convolutionKernel, gaussianKernel5x5, sizeof(gaussianKernel5x5), 0); // Sobel gradient kernels /* Sobel Gradient kernel source :http://homepages.inf.ed.ac.uk/rbf/HIPR2/sobel.htm */ // Note: Angle is taken as zero. const float sobelGX[9] = { -1.f, 0.f, 1.f, -2.f, 0.f, 2.f, -1.f, 0.f, 1.f, }; const float sobelGY[9] = { 1.f, 2.f, 1.f, 0.f, 0.f, 0.f, -1.f, -2.f, -1.f, }; cudaMemcpyToSymbol(convolutionKernel, sobelGX, sizeof(sobelGX), sizeof(gaussianKernel5x5)); cudaMemcpyToSymbol(convolutionKernel, sobelGY, sizeof(sobelGY), sizeof(gaussianKernel5x5) + sizeof(sobelGX)); const ssize_t sobelGradientXOffset = sizeof(gaussianKernel5x5)/sizeof(float); const ssize_t sobelGradientYOffset = sizeof(sobelGX)/sizeof(float) + sobelGradientXOffset; // Create CPU/GPU shared images - one for the initial and one for the result camera >> frame; unsigned char *sourceDataDevice, *blurredDataDevice, *edgesDataDevice,*boxDataDevice,*sobelDataDevice; cv::Mat source (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &sourceDataDevice)); cv::Mat blurred (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &blurredDataDevice)); cv::Mat edges (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &edgesDataDevice)); cv::Mat Imagebuffer2 (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &boxDataDevice)); cv::Mat Imagebuffer3 (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &sobelDataDevice)); // Create two temporary images (for holding sobel gradients) unsigned char *deviceGradientX, *deviceGradientY; cudaMalloc(&deviceGradientX, frame.size().width * frame.size().height); cudaMalloc(&deviceGradientY, frame.size().width * frame.size().height); // Loop while capturing images while(1) { // Capture the image from the camera object; camera >> frame; // convert the image to gray scale cv::cvtColor(frame, source, CV_BGR2GRAY); // Record the time it takes to process { // convolution kernel launch parameters dim3 cblocks (frame.size().width / 16, frame.size().height / 16); dim3 cthreads(16, 16); // pythagorean kernel (resultant vector) launch paramters dim3 pblocks (frame.size().width * frame.size().height / 256); dim3 pthreads(256, 1); boxfilter(frame.size().width, frame.size().height, source.data, Imagebuffer2.data, 3, 3); sobelfilter(frame.size().width, frame.size().height, Imagebuffer2.data, Imagebuffer3.data); // Perform the gaussian blur (first kernel in store @ 0) convolve<<<cblocks,cthreads>>>(sourceDataDevice, frame.size().width, frame.size().height, 0, 0, 0, 5, 5, blurredDataDevice); // Perform the sobel gradient convolutions (x&y padding is now 2 because there is a border of 2 around a 5x5 gaussian filtered image) convolve<<<cblocks,cthreads>>>(blurredDataDevice, frame.size().width, frame.size().height, 2, 2, sobelGradientXOffset, 3, 3, deviceGradientX); convolve<<<cblocks,cthreads>>>(blurredDataDevice, frame.size().width, frame.size().height, 2, 2, sobelGradientYOffset, 3, 3, deviceGradientY); resultant<<<pblocks,pthreads>>>(deviceGradientX, deviceGradientY, edgesDataDevice); cudaThreadSynchronize (); } // Show the results cv::imshow("Source", frame); cv::imshow("Greyscale", source); cv::imshow("Blurred", blurred); cv::imshow("Gaussian", edges); cv::imshow("BOX",Imagebuffer2); cv::imshow("sobel",Imagebuffer3); // Spin if(cv::waitKey(1) == 27) break; } // Exit cudaFreeHost(source.data); cudaFreeHost(blurred.data); cudaFreeHost(edges.data); cudaFree(deviceGradientX); cudaFree(deviceGradientY); return 0; }
247099201d459c66398e86622d6ee24e9d812674.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <GL/glew.h> #include <GL/gl.h> #include <GL/glut.h> #include <GL/freeglut.h> #include <math.h> #include <stdbool.h> #include <omp.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> #define PI 3.141592653589793 #define cap 1000 #define ref 0.9 #define temp 3000 #define visc 9 #define GRAV (6.674*0.00000000000000000001) #define density (2.5 * 1000000000000) #define sigma (0.96*5.67*0.00000001) //W/m^2 T^4 #define cool (sigma*4*PI*rad*rad*1000000*15) #define rad 300 //km #define M (4 / 3 * PI * rad*rad*rad* density)//kg #define MOONOFFSET_X (INIT_WIDTH/vision/2) #define MOONOFFSET_Y (INIT_WIDTH/vision*2) #define MOONOFFSET_Z (INIT_HEIGHT/vision) #define dev 360 #define resol 50 #define hollow 30 #define X 0 #define Y 1 #define Z 2 #define ANIM_START 0 #define ANIM 20 #define scale 0.01 #define colmargin 1.05 #define R (rad * scale) #define INIT_WIDTH 800 #define INIT_HEIGHT 800 #define vision 40 #define Grid_x 4 #define Grid_y 2 #define Grid_z 1 #define Block_x 2 #define Block_y 1 #define Block_z 1 #define NUM_POINTS (Grid_x*Grid_y*Grid_z*Block_x*Block_y*Block_z) unsigned int dev_points = dev + 1; unsigned int window_width = INIT_WIDTH; unsigned int window_height = INIT_HEIGHT; double vision_size = vision; float right_motion=0; float up_motion=0; double left, right, bottom, top; float h_point[NUM_POINTS][3]; float v_point[NUM_POINTS][3]; float st_point[NUM_POINTS]; float e_point[NUM_POINTS]; float J_point[NUM_POINTS]; float h_buff[NUM_POINTS][3]={0}; float anim_time = ANIM_START; float anim_dt = ANIM; double phi = 30.0; double theta = 30.0; float light_pos[4]; int mouse_old_x, mouse_old_y; bool motion_p; bool motion_w; double eye[3]; double center[3] = {0.0, 0.0, 0.0}; double up[3]; double ** point; float (*d_point)[3]; float (*dv_point)[3]; float (*dst_point); float (*de_point); float (*dJ_point); float (*v_buff)[3]; float colsynctime[NUM_POINTS][NUM_POINTS]={0}; int colsyncindex[NUM_POINTS][NUM_POINTS]={0}; float (*dcolsynctime)[NUM_POINTS]; int (*dcolsyncindex)[NUM_POINTS]; __global__ void grav_coldetect(float(*pos)[3],float(*vec)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]); __global__ void grav_colv(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]); __global__ void grav_v(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],int(*colindex)[NUM_POINTS]); __global__ void grav_vupdate(float(*vec)[3],float(*v_buff)[3]); __global__ void buff_clear(float(*v_buff)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]); __global__ void grav_p(float (*pos)[3], float(*vec)[3]); //basic function double dot(double vec0[], double vec1[]) { return(vec0[X] * vec1[X] + vec0[Y] * vec1[Y] + vec0[Z] * vec1[Z]); } void cross(double vec0[], double vec1[], double vec2[]) { vec2[X] = vec0[Y] * vec1[Z] - vec0[Z] * vec1[Y]; vec2[Y] = vec0[Z] * vec1[X] - vec0[X] * vec1[Z]; vec2[Z] = vec0[X] * vec1[Y] - vec0[Y] * vec1[X]; } void normVec(double vec[]) { double norm; norm = sqrt(vec[X] * vec[X] + vec[Y] * vec[Y] + vec[Z] * vec[Z]); vec[X] /= norm; vec[Y] /= norm; vec[Z] /= norm; } void normal(double p0[], double p1[], double p2[], double normal[]) { unsigned int i; double v0[3], v1[3]; for (i = 0; i < 3; i++) { v0[i] = p2[i] - p1[i]; v1[i] = p0[i] - p1[i]; } cross(v0, v1, normal); normVec(normal); } //colision detection __global__ void grav_coldetect(float(*pos)[3],float(*vec)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]) { float xn,yn,zn,vx,vy,vz,dis,sq; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; float rvec[3]={0}; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; for (int i = 0 ; i < NUM_POINTS; i++) { sq = (float)pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2); dis = (float)sqrt((double)sq); rvec[0]=(pos[i][0]-xn)/dis; rvec[1]=(pos[i][1]-yn)/dis; rvec[2]=(pos[i][2]-zn)/dis; if (dis > 2 * R * colmargin && i != index) { colindex[index][i]=NUM_POINTS; } else if (dis <= 2 * R * colmargin && i != index) { colindex[index][i]=i; coltime[index][i]=(2*R*colmargin-dis)/((vx-vec[i][0])*rvec[0]+(vy-vec[i][1])*rvec[1]+(vz-vec[i][2])*rvec[2]); } else { colindex[index][i]=NUM_POINTS; } } } //culculate speed after colision __global__ void grav_colv(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]) { float xn,yn,zn,sq,dis; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; int colnum=0; float tmptime=0; int tmpindex=0; int coldex=0; float repul=0; float rvec[3]={0}; float Vl[3]={0}; float Vr[3]={0}; float Vh[3]={0}; float vl_buff[3]={0}; float vr_buff[3]={0}; float vcol_buff[3]={0}; float dotV=0; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vl_buff[0]=vec[index][0]; vl_buff[1]=vec[index][1]; vl_buff[2]=vec[index][2]; for (int i = 0 ; i < NUM_POINTS; i++){ if(colindex[index][i]!=NUM_POINTS){ colnum++; } } if(colnum>0){ for (int i = 0 ; i < NUM_POINTS; i++){ for(int j = i+1; j < NUM_POINTS; j++){ if(coltime[index][i] > coltime[index][j]){ tmptime=coltime[index][i]; tmpindex=colindex[index][i]; coltime[index][i]=coltime[index][j]; colindex[index][i]=colindex[index][j]; coltime[index][j]=tmptime; colindex[index][j]=tmpindex; } } } for (int i=NUM_POINTS-1 ; i>=NUM_POINTS-colnum; i--){ coldex=colindex[index][i]; sq = (float)pow((double)(xn-pos[coldex][0]),2)+pow((double)(yn-pos[coldex][1]),2)+pow((double)(zn-pos[coldex][2]),2); dis = (float)sqrt((double)sq); rvec[0]=(pos[coldex][0]-xn)/dis; rvec[1]=(pos[coldex][1]-yn)/dis; rvec[2]=(pos[coldex][2]-zn)/dis; dotV=rvec[0]*vl_buff[0]+rvec[1]*vl_buff[1]+rvec[2]*vl_buff[2]; Vl[0]=dotV*rvec[0]; Vl[1]=dotV*rvec[1]; Vl[2]=dotV*rvec[2]; dotV=rvec[0]*vec[coldex][0]+rvec[1]*vec[coldex][1]+rvec[2]*vec[coldex][2]; Vr[0]=dotV*rvec[0]; Vr[1]=dotV*rvec[1]; Vr[2]=dotV*rvec[2]; Vh[0]=vl_buff[0]-Vl[0]; Vh[1]=vl_buff[1]-Vl[1]; Vh[2]=vl_buff[2]-Vl[2]; repul=e[index]; if (e[coldex] < e[index]) { repul=e[coldex]; } vcol_buff[0]=Vh[0]+((1+repul)*Vr[0]+(1-repul)*Vl[0])/2; vcol_buff[1]=Vh[1]+((1+repul)*Vr[1]+(1-repul)*Vl[1])/2; vcol_buff[2]=Vh[2]+((1+repul)*Vr[2]+(1-repul)*Vl[2])/2; vr_buff[0]=vec[coldex][0]-Vr[0]+((1+repul)*Vl[0]+(1-repul)*Vr[0])/2; vr_buff[1]=vec[coldex][1]-Vr[1]+((1+repul)*Vl[1]+(1-repul)*Vr[1])/2; vr_buff[2]=vec[coldex][2]-Vr[2]+((1+repul)*Vl[2]+(1-repul)*Vr[2])/2; double Energy=0.5*M*(pow(vec[coldex][0],2)+pow(vec[coldex][1],2)+pow(vec[coldex][2],2)+pow(vl_buff[0],2)+pow(vl_buff[1],2)+pow(vl_buff[2],2) - (pow(vcol_buff[0],2)+pow(vcol_buff[1],2)+pow(vcol_buff[2],2)+pow(vr_buff[0],2)+pow(vr_buff[1],2)+pow(vr_buff[2],2))) / pow(scale,2) * 1000000; J[index] += Energy / (pow(10.0,(double)(sti[index]-sti[coldex]))+1); if (J[index] > M * cap * 10000000){ J[index] = M * cap * 10000000; } vl_buff[0]=vcol_buff[0]; vl_buff[1]=vcol_buff[1]; vl_buff[2]=vcol_buff[2]; e[index] = 1 - ((1-ref)/temp * J[index]/M/cap); if ( e[index] < 0 ){ e[index] = 0; } if ( e[index] > 1 ){ e[index] = 1; } sti[index] = visc - ((J[index]/M/cap - temp) / 100); } v_buff[index][0]=vl_buff[0]; v_buff[index][1]=vl_buff[1]; v_buff[index][2]=vl_buff[2]; } J[index]-=cool*(J[index]/M/cap)*(J[index]/M/cap)*(J[index]/M/cap)*(J[index]/M/cap)*ANIM; if (J[index] < 0) { J[index] = 0; } } //calculate speed after gravity affect __global__ void grav_v(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],int(*colindex)[NUM_POINTS]) { float xn,yn,zn,vx,vy,vz,sq,dis; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; int colnum=0; float gravity=0; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; for (int i = 0 ; i < NUM_POINTS; i++){ if(colindex[index][i]!=NUM_POINTS){ colnum++; } } if(colnum==0){ vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; for (int i = 0 ; i < NUM_POINTS; i++){ if (i!=index) { sq = (float)pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2); gravity=GRAV*M/sq*scale*scale; dis = (float)sqrt((double)sq); vx += ((pos[i][0]-xn)/dis)*gravity*ANIM*scale; vy += ((pos[i][1]-yn)/dis)*gravity*ANIM*scale; vz += ((pos[i][2]-zn)/dis)*gravity*ANIM*scale; } } } else { vx = v_buff[index][0]; vy = v_buff[index][1]; vz = v_buff[index][2]; for (int i = 0 ; i < NUM_POINTS; i++){ sq = (float)pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2); gravity=GRAV*M/sq*scale*scale; dis = (float)sqrt((double)sq); if(dis > 2 * R * colmargin) { vx += ((pos[i][0]-xn)/dis)*gravity*ANIM*scale; vy += ((pos[i][1]-yn)/dis)*gravity*ANIM*scale; vz += ((pos[i][2]-zn)/dis)*gravity*ANIM*scale; } } } v_buff[index][0] = vx; v_buff[index][1] = vy; v_buff[index][2] = vz; } __global__ void grav_vupdate(float(*vec)[3],float(*v_buff)[3]) { unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; vec[index][0]=v_buff[index][0]; vec[index][1]=v_buff[index][1]; vec[index][2]=v_buff[index][2]; } __global__ void buff_clear(float(*v_buff)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]) { unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; for (int i=0; i < 3; i++){ v_buff[index][i]=0; } for (int i=0; i < NUM_POINTS; i++){ coltime[index][i]=0; colindex[index][i]=NUM_POINTS; } } //calculate position after gravity affect __global__ void grav_p(float(*pos)[3], float(*vec)[3]) { float xn,yn,zn,vx,vy,vz; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = ( blockDim.x * (Grid_x - 1) + blockDim.x ) * ( blockDim.y * (Grid_y - 1) + blockDim.y ) * thread_idz + ( blockDim.x * (Grid_x - 1) + blockDim.x ) * thread_idy + thread_idx ; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; pos[index][0] = xn + vx * ANIM; pos[index][1] = yn + vy * ANIM; pos[index][2] = zn + vz * ANIM; } void setInitialPosition(void) { for (int i = 0; i < NUM_POINTS; i++) { for (int j = 0 ; j < 3 ; j++){ h_point[i][j] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision*2 ; v_point[i][j] = 0; h_buff[i][j] = 0; } /* int earth_points = NUM_POINTS - (NUM_POINTS/16); if(i < earth_points){ for (int j = 0 ; j < 3 ; j++){ h_point[i][j] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/2 ; v_point[i][j] = 0; h_buff[i][j] = 0; } } else { h_point[i][0] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/4 + MOONOFFSET_X; h_point[i][1] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/4 + MOONOFFSET_Y; h_point[i][2] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/4 + MOONOFFSET_Z; v_point[i][0] = -(MOONOFFSET_X*scale/ANIM)/4; v_point[i][1] = -(MOONOFFSET_Y*scale/ANIM)/2.5; v_point[i][2] = -(MOONOFFSET_Z*scale/ANIM)/4; for (int j = 0 ; j < 3 ; j++){ h_buff[i][j] = 0; } } */ st_point[i]=visc; e_point[i]=ref; J_point[i]=cap*M*temp; for (int j = 0; j < NUM_POINTS; j++) { colsyncindex[i][j]=NUM_POINTS; } } checkCudaErrors(hipMalloc((void**)&d_point, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&dv_point, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&v_buff, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&dst_point, NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&de_point, NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&dJ_point, NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&dcolsynctime, NUM_POINTS*NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&dcolsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int))); checkCudaErrors(hipMemcpy(d_point, h_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dv_point, v_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(v_buff, h_buff, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dst_point, st_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(de_point, e_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dJ_point, J_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dcolsynctime, colsynctime, NUM_POINTS*NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dcolsyncindex, colsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int) , hipMemcpyHostToDevice)); } //CUDA void launchGPUKernel(unsigned int num_particles,float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]) { dim3 grid(Grid_x,Grid_y,Grid_z); dim3 block(Block_x,Block_y,Block_z); hipLaunchKernelGGL(( grav_coldetect), dim3(grid) , dim3(block), 0, 0, pos, vec,coltime,colindex); hipLaunchKernelGGL(( grav_colv), dim3(grid) , dim3(block), 0, 0, pos,vec,v_buff,sti,e,J,coltime,colindex); hipLaunchKernelGGL(( grav_v), dim3(grid) , dim3(block), 0, 0, pos,vec,v_buff,colindex); hipLaunchKernelGGL(( grav_vupdate), dim3(grid) , dim3(block), 0, 0, vec,v_buff); hipLaunchKernelGGL(( buff_clear), dim3(grid) , dim3(block), 0, 0, v_buff,coltime,colindex); hipLaunchKernelGGL(( grav_p), dim3(grid) , dim3(block), 0, 0, pos,vec); } //animation void runGPUKernel(void) { launchGPUKernel(NUM_POINTS, d_point, dv_point,v_buff,dst_point, de_point,dJ_point,dcolsynctime,dcolsyncindex); checkCudaErrors(hipMemcpy(h_point, d_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(v_point, dv_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_buff, v_buff, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(st_point, dst_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(e_point, de_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(J_point, dJ_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(colsynctime,dcolsynctime, NUM_POINTS*NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(colsyncindex,dcolsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int) , hipMemcpyDeviceToHost)); anim_time += anim_dt; } void defineViewMatrix(double phi, double theta) { unsigned int i; double c, s, xy_dist; double x_axis[3], y_axis[3], z_axis[3]; eye[Z] = sin(theta * PI / 180.0); xy_dist = cos(theta * PI / 180.0); c = cos(phi * PI / 180.0); s = sin(phi * PI / 180.0); eye[X] = xy_dist * c; eye[Y] = xy_dist * s; up[X] = - c * eye[Z]; up[Y] = - s * eye[Z]; up[Z] = s * eye[Y] + c * eye[X]; normVec(up); for (i = 0; i < 3; i++) { z_axis[i] = eye[i] - center[i]; } normVec(z_axis); cross(up, z_axis, x_axis); normVec(x_axis); cross(z_axis, x_axis, y_axis); gluLookAt(eye[X], eye[Y], eye[Z], center[X], center[Y], center[Z], up[X], up[Y], up[Z]); } void metaball (float pos[3], float color[3]) { double margin=0; double view[3]={0}; double TH=theta; double PH=-phi; for (int i = 0 ; i < dev_points ; i ++) { view[0] = 0; view[1] = R * cos(i * PI * 2 / dev); view[2] = R * sin(i * PI * 2 / dev); point[i][X] = view[0] * cos(TH * PI / 180) * cos(PH * PI / 180) + view[1] * sin(PH * PI / 180) - view[2] * sin(TH * PI / 180) * cos(PH * PI / 180); point[i][Y] = - view[0] * cos(TH * PI / 180) * sin(PH * PI / 180) + view[1] * cos(PH * PI / 180) + view[2] * sin(TH * PI / 180) * sin(PH * PI / 180); point[i][Z] = view[0] * sin(TH * PI / 180) + view[2] * cos(TH * PI / 180); } glBegin(GL_TRIANGLE_FAN); glColor4f(1,1,1,0.3); glVertex3d(pos[X],pos[Y],pos[Z]); for (int i = 0 ; i < dev_points ; i ++) { glVertex3d(point[i][X] + pos[X], point[i][Y] + pos[Y], point[i][Z] + pos[Z]); } glEnd(); int mh[dev_points]; for (int i = 0 ; i < dev_points ; i ++) { mh[i]=1; } glBegin(GL_POINTS); glColor4f(color[0],color[1],color[2],0.1); for (int k = 0; k < hollow; k++) { margin=(colmargin-1)/10*k+1; for (int i = 0 ; i < dev_points ; i ++) { if((mh[i]==1) && (rand() % dev) < (dev * (hollow-k/2)/hollow)) glVertex3d(margin*point[i][X] + pos[X], margin*point[i][Y] + pos[Y], margin*point[i][Z] + pos[Z]); else mh[i]=0; } } glEnd(); } void display(void) { light_pos[0] = (float)eye[X]; light_pos[1] = (float)eye[Y]; light_pos[2] = (float)eye[Z]; light_pos[3] = 0.0f; runGPUKernel(); glLightfv(GL_LIGHT0, GL_POSITION, light_pos); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(-vision_size-right_motion/2, vision_size+right_motion/2, -vision_size-right_motion/2, vision_size+right_motion/2, -100*vision_size, 100*vision_size); glViewport(0, 0, window_width, window_height); defineViewMatrix(phi, theta); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); float color[3]={0}; for (int k = 0 ; k < NUM_POINTS ; k++) { if(J_point[k]/M/cap-temp < resol){ color[0]=1.0; color[1]=1.0; color[2]=1.0-(J_point[k]/M/cap-temp)/resol; } else if(J_point[k]/M/cap-temp < 2 * resol){ color[0]=1.0; color[1]=1.0-(J_point[k]/M/cap-temp-resol)/resol; color[2]=0.0; } else { color[0]=1.0; color[1]=0.0; color[2]=0.0; } metaball(h_point[k],color); } glutSwapBuffers(); glutPostRedisplay(); } void mouse_button(int button, int state, int x, int y) { if ((state == GLUT_DOWN) && (button == GLUT_LEFT_BUTTON)) motion_p = true; if ((state == GLUT_DOWN) && (button == GLUT_RIGHT_BUTTON)) motion_w = true; else if (state == GLUT_UP) { motion_p = false; motion_w = false; } mouse_old_x = x; mouse_old_y = y; } void mouse_motion(int x, int y) { int dx, dy; dx = x - mouse_old_x; dy = y - mouse_old_y; if (motion_p) { phi -= dx * 0.2; theta += dy * 0.2; } if (motion_w) { right_motion += dx / 10; up_motion -= dy / 10; } mouse_old_x = x; mouse_old_y = y; glutPostRedisplay(); } void resize(int width, int height) { window_width = width; window_height = height; } void keyboard(unsigned char key, int x, int y) { switch (key) { case 'q': case 'Q': case '\033': exit(0); default: break; } } bool initGL(void) { glClearColor(0.0f, 0.0f , 0.0f, 0.5f); glEnable(GL_DEPTH_TEST); glClearDepth(1.0); glDepthFunc(GL_LESS); glEnable(GL_LIGHT0); return true; } int main(int argc, char** argv) { point = (double **)malloc(sizeof(double *) * dev_points); for (int i = 0 ; i < dev_points ; i++) { point[i] = (double *)malloc(sizeof(double) * 3); } glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("3D CUDA Simulation"); glutDisplayFunc(display); glutReshapeFunc(resize); glutKeyboardFunc(keyboard); glutMouseFunc(mouse_button); glutMotionFunc(mouse_motion); setInitialPosition(); if (!initGL()) return 1; glutMainLoop(); hipFree(dst_point); hipFree(de_point); hipFree(dJ_point); for (int i = 0 ; i < dev_points ; i++) { free (point[i]); hipFree(d_point[i]); hipFree(dv_point[i]); hipFree(v_buff[i]); hipFree(dcolsynctime[i]); hipFree(dcolsyncindex[i]); } free (point); hipFree(d_point); hipFree(dv_point); hipFree(v_buff); hipFree(dcolsynctime); hipFree(dcolsyncindex); hipDeviceReset(); return 0; }
247099201d459c66398e86622d6ee24e9d812674.cu
#include <stdio.h> #include <stdlib.h> #include <GL/glew.h> #include <GL/gl.h> #include <GL/glut.h> #include <GL/freeglut.h> #include <math.h> #include <stdbool.h> #include <omp.h> #include <cuda.h> #include <helper_cuda.h> #include <helper_functions.h> #include <cuda_runtime.h> #include <cuda_gl_interop.h> #define PI 3.141592653589793 #define cap 1000 #define ref 0.9 #define temp 3000 #define visc 9 #define GRAV (6.674*0.00000000000000000001) #define density (2.5 * 1000000000000) #define sigma (0.96*5.67*0.00000001) //W/m^2 T^4 #define cool (sigma*4*PI*rad*rad*1000000*15) #define rad 300 //km #define M (4 / 3 * PI * rad*rad*rad* density)//kg #define MOONOFFSET_X (INIT_WIDTH/vision/2) #define MOONOFFSET_Y (INIT_WIDTH/vision*2) #define MOONOFFSET_Z (INIT_HEIGHT/vision) #define dev 360 #define resol 50 #define hollow 30 #define X 0 #define Y 1 #define Z 2 #define ANIM_START 0 #define ANIM 20 #define scale 0.01 #define colmargin 1.05 #define R (rad * scale) #define INIT_WIDTH 800 #define INIT_HEIGHT 800 #define vision 40 #define Grid_x 4 #define Grid_y 2 #define Grid_z 1 #define Block_x 2 #define Block_y 1 #define Block_z 1 #define NUM_POINTS (Grid_x*Grid_y*Grid_z*Block_x*Block_y*Block_z) unsigned int dev_points = dev + 1; unsigned int window_width = INIT_WIDTH; unsigned int window_height = INIT_HEIGHT; double vision_size = vision; float right_motion=0; float up_motion=0; double left, right, bottom, top; float h_point[NUM_POINTS][3]; float v_point[NUM_POINTS][3]; float st_point[NUM_POINTS]; float e_point[NUM_POINTS]; float J_point[NUM_POINTS]; float h_buff[NUM_POINTS][3]={0}; float anim_time = ANIM_START; float anim_dt = ANIM; double phi = 30.0; double theta = 30.0; float light_pos[4]; int mouse_old_x, mouse_old_y; bool motion_p; bool motion_w; double eye[3]; double center[3] = {0.0, 0.0, 0.0}; double up[3]; double ** point; float (*d_point)[3]; float (*dv_point)[3]; float (*dst_point); float (*de_point); float (*dJ_point); float (*v_buff)[3]; float colsynctime[NUM_POINTS][NUM_POINTS]={0}; int colsyncindex[NUM_POINTS][NUM_POINTS]={0}; float (*dcolsynctime)[NUM_POINTS]; int (*dcolsyncindex)[NUM_POINTS]; __global__ void grav_coldetect(float(*pos)[3],float(*vec)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]); __global__ void grav_colv(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]); __global__ void grav_v(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],int(*colindex)[NUM_POINTS]); __global__ void grav_vupdate(float(*vec)[3],float(*v_buff)[3]); __global__ void buff_clear(float(*v_buff)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]); __global__ void grav_p(float (*pos)[3], float(*vec)[3]); //basic function double dot(double vec0[], double vec1[]) { return(vec0[X] * vec1[X] + vec0[Y] * vec1[Y] + vec0[Z] * vec1[Z]); } void cross(double vec0[], double vec1[], double vec2[]) { vec2[X] = vec0[Y] * vec1[Z] - vec0[Z] * vec1[Y]; vec2[Y] = vec0[Z] * vec1[X] - vec0[X] * vec1[Z]; vec2[Z] = vec0[X] * vec1[Y] - vec0[Y] * vec1[X]; } void normVec(double vec[]) { double norm; norm = sqrt(vec[X] * vec[X] + vec[Y] * vec[Y] + vec[Z] * vec[Z]); vec[X] /= norm; vec[Y] /= norm; vec[Z] /= norm; } void normal(double p0[], double p1[], double p2[], double normal[]) { unsigned int i; double v0[3], v1[3]; for (i = 0; i < 3; i++) { v0[i] = p2[i] - p1[i]; v1[i] = p0[i] - p1[i]; } cross(v0, v1, normal); normVec(normal); } //colision detection __global__ void grav_coldetect(float(*pos)[3],float(*vec)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]) { float xn,yn,zn,vx,vy,vz,dis,sq; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; float rvec[3]={0}; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; for (int i = 0 ; i < NUM_POINTS; i++) { sq = (float)pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2); dis = (float)sqrt((double)sq); rvec[0]=(pos[i][0]-xn)/dis; rvec[1]=(pos[i][1]-yn)/dis; rvec[2]=(pos[i][2]-zn)/dis; if (dis > 2 * R * colmargin && i != index) { colindex[index][i]=NUM_POINTS; } else if (dis <= 2 * R * colmargin && i != index) { colindex[index][i]=i; coltime[index][i]=(2*R*colmargin-dis)/((vx-vec[i][0])*rvec[0]+(vy-vec[i][1])*rvec[1]+(vz-vec[i][2])*rvec[2]); } else { colindex[index][i]=NUM_POINTS; } } } //culculate speed after colision __global__ void grav_colv(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]) { float xn,yn,zn,sq,dis; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; int colnum=0; float tmptime=0; int tmpindex=0; int coldex=0; float repul=0; float rvec[3]={0}; float Vl[3]={0}; float Vr[3]={0}; float Vh[3]={0}; float vl_buff[3]={0}; float vr_buff[3]={0}; float vcol_buff[3]={0}; float dotV=0; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vl_buff[0]=vec[index][0]; vl_buff[1]=vec[index][1]; vl_buff[2]=vec[index][2]; for (int i = 0 ; i < NUM_POINTS; i++){ if(colindex[index][i]!=NUM_POINTS){ colnum++; } } if(colnum>0){ for (int i = 0 ; i < NUM_POINTS; i++){ for(int j = i+1; j < NUM_POINTS; j++){ if(coltime[index][i] > coltime[index][j]){ tmptime=coltime[index][i]; tmpindex=colindex[index][i]; coltime[index][i]=coltime[index][j]; colindex[index][i]=colindex[index][j]; coltime[index][j]=tmptime; colindex[index][j]=tmpindex; } } } for (int i=NUM_POINTS-1 ; i>=NUM_POINTS-colnum; i--){ coldex=colindex[index][i]; sq = (float)pow((double)(xn-pos[coldex][0]),2)+pow((double)(yn-pos[coldex][1]),2)+pow((double)(zn-pos[coldex][2]),2); dis = (float)sqrt((double)sq); rvec[0]=(pos[coldex][0]-xn)/dis; rvec[1]=(pos[coldex][1]-yn)/dis; rvec[2]=(pos[coldex][2]-zn)/dis; dotV=rvec[0]*vl_buff[0]+rvec[1]*vl_buff[1]+rvec[2]*vl_buff[2]; Vl[0]=dotV*rvec[0]; Vl[1]=dotV*rvec[1]; Vl[2]=dotV*rvec[2]; dotV=rvec[0]*vec[coldex][0]+rvec[1]*vec[coldex][1]+rvec[2]*vec[coldex][2]; Vr[0]=dotV*rvec[0]; Vr[1]=dotV*rvec[1]; Vr[2]=dotV*rvec[2]; Vh[0]=vl_buff[0]-Vl[0]; Vh[1]=vl_buff[1]-Vl[1]; Vh[2]=vl_buff[2]-Vl[2]; repul=e[index]; if (e[coldex] < e[index]) { repul=e[coldex]; } vcol_buff[0]=Vh[0]+((1+repul)*Vr[0]+(1-repul)*Vl[0])/2; vcol_buff[1]=Vh[1]+((1+repul)*Vr[1]+(1-repul)*Vl[1])/2; vcol_buff[2]=Vh[2]+((1+repul)*Vr[2]+(1-repul)*Vl[2])/2; vr_buff[0]=vec[coldex][0]-Vr[0]+((1+repul)*Vl[0]+(1-repul)*Vr[0])/2; vr_buff[1]=vec[coldex][1]-Vr[1]+((1+repul)*Vl[1]+(1-repul)*Vr[1])/2; vr_buff[2]=vec[coldex][2]-Vr[2]+((1+repul)*Vl[2]+(1-repul)*Vr[2])/2; double Energy=0.5*M*(pow(vec[coldex][0],2)+pow(vec[coldex][1],2)+pow(vec[coldex][2],2)+pow(vl_buff[0],2)+pow(vl_buff[1],2)+pow(vl_buff[2],2) - (pow(vcol_buff[0],2)+pow(vcol_buff[1],2)+pow(vcol_buff[2],2)+pow(vr_buff[0],2)+pow(vr_buff[1],2)+pow(vr_buff[2],2))) / pow(scale,2) * 1000000; J[index] += Energy / (pow(10.0,(double)(sti[index]-sti[coldex]))+1); if (J[index] > M * cap * 10000000){ J[index] = M * cap * 10000000; } vl_buff[0]=vcol_buff[0]; vl_buff[1]=vcol_buff[1]; vl_buff[2]=vcol_buff[2]; e[index] = 1 - ((1-ref)/temp * J[index]/M/cap); if ( e[index] < 0 ){ e[index] = 0; } if ( e[index] > 1 ){ e[index] = 1; } sti[index] = visc - ((J[index]/M/cap - temp) / 100); } v_buff[index][0]=vl_buff[0]; v_buff[index][1]=vl_buff[1]; v_buff[index][2]=vl_buff[2]; } J[index]-=cool*(J[index]/M/cap)*(J[index]/M/cap)*(J[index]/M/cap)*(J[index]/M/cap)*ANIM; if (J[index] < 0) { J[index] = 0; } } //calculate speed after gravity affect __global__ void grav_v(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],int(*colindex)[NUM_POINTS]) { float xn,yn,zn,vx,vy,vz,sq,dis; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; int colnum=0; float gravity=0; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; for (int i = 0 ; i < NUM_POINTS; i++){ if(colindex[index][i]!=NUM_POINTS){ colnum++; } } if(colnum==0){ vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; for (int i = 0 ; i < NUM_POINTS; i++){ if (i!=index) { sq = (float)pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2); gravity=GRAV*M/sq*scale*scale; dis = (float)sqrt((double)sq); vx += ((pos[i][0]-xn)/dis)*gravity*ANIM*scale; vy += ((pos[i][1]-yn)/dis)*gravity*ANIM*scale; vz += ((pos[i][2]-zn)/dis)*gravity*ANIM*scale; } } } else { vx = v_buff[index][0]; vy = v_buff[index][1]; vz = v_buff[index][2]; for (int i = 0 ; i < NUM_POINTS; i++){ sq = (float)pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2); gravity=GRAV*M/sq*scale*scale; dis = (float)sqrt((double)sq); if(dis > 2 * R * colmargin) { vx += ((pos[i][0]-xn)/dis)*gravity*ANIM*scale; vy += ((pos[i][1]-yn)/dis)*gravity*ANIM*scale; vz += ((pos[i][2]-zn)/dis)*gravity*ANIM*scale; } } } v_buff[index][0] = vx; v_buff[index][1] = vy; v_buff[index][2] = vz; } __global__ void grav_vupdate(float(*vec)[3],float(*v_buff)[3]) { unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; vec[index][0]=v_buff[index][0]; vec[index][1]=v_buff[index][1]; vec[index][2]=v_buff[index][2]; } __global__ void buff_clear(float(*v_buff)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]) { unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; for (int i=0; i < 3; i++){ v_buff[index][i]=0; } for (int i=0; i < NUM_POINTS; i++){ coltime[index][i]=0; colindex[index][i]=NUM_POINTS; } } //calculate position after gravity affect __global__ void grav_p(float(*pos)[3], float(*vec)[3]) { float xn,yn,zn,vx,vy,vz; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = ( blockDim.x * (Grid_x - 1) + blockDim.x ) * ( blockDim.y * (Grid_y - 1) + blockDim.y ) * thread_idz + ( blockDim.x * (Grid_x - 1) + blockDim.x ) * thread_idy + thread_idx ; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; pos[index][0] = xn + vx * ANIM; pos[index][1] = yn + vy * ANIM; pos[index][2] = zn + vz * ANIM; } void setInitialPosition(void) { for (int i = 0; i < NUM_POINTS; i++) { for (int j = 0 ; j < 3 ; j++){ h_point[i][j] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision*2 ; v_point[i][j] = 0; h_buff[i][j] = 0; } /* int earth_points = NUM_POINTS - (NUM_POINTS/16); if(i < earth_points){ for (int j = 0 ; j < 3 ; j++){ h_point[i][j] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/2 ; v_point[i][j] = 0; h_buff[i][j] = 0; } } else { h_point[i][0] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/4 + MOONOFFSET_X; h_point[i][1] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/4 + MOONOFFSET_Y; h_point[i][2] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/4 + MOONOFFSET_Z; v_point[i][0] = -(MOONOFFSET_X*scale/ANIM)/4; v_point[i][1] = -(MOONOFFSET_Y*scale/ANIM)/2.5; v_point[i][2] = -(MOONOFFSET_Z*scale/ANIM)/4; for (int j = 0 ; j < 3 ; j++){ h_buff[i][j] = 0; } } */ st_point[i]=visc; e_point[i]=ref; J_point[i]=cap*M*temp; for (int j = 0; j < NUM_POINTS; j++) { colsyncindex[i][j]=NUM_POINTS; } } checkCudaErrors(cudaMalloc((void**)&d_point, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dv_point, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&v_buff, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dst_point, NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&de_point, NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dJ_point, NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dcolsynctime, NUM_POINTS*NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dcolsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int))); checkCudaErrors(cudaMemcpy(d_point, h_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dv_point, v_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(v_buff, h_buff, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dst_point, st_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(de_point, e_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dJ_point, J_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dcolsynctime, colsynctime, NUM_POINTS*NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dcolsyncindex, colsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int) , cudaMemcpyHostToDevice)); } //CUDA void launchGPUKernel(unsigned int num_particles,float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]) { dim3 grid(Grid_x,Grid_y,Grid_z); dim3 block(Block_x,Block_y,Block_z); grav_coldetect<<<grid , block>>>(pos, vec,coltime,colindex); grav_colv<<<grid , block>>>(pos,vec,v_buff,sti,e,J,coltime,colindex); grav_v<<<grid , block>>>(pos,vec,v_buff,colindex); grav_vupdate<<<grid , block>>>(vec,v_buff); buff_clear<<<grid , block>>>(v_buff,coltime,colindex); grav_p<<<grid , block>>>(pos,vec); } //animation void runGPUKernel(void) { launchGPUKernel(NUM_POINTS, d_point, dv_point,v_buff,dst_point, de_point,dJ_point,dcolsynctime,dcolsyncindex); checkCudaErrors(cudaMemcpy(h_point, d_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(v_point, dv_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_buff, v_buff, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(st_point, dst_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(e_point, de_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(J_point, dJ_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(colsynctime,dcolsynctime, NUM_POINTS*NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(colsyncindex,dcolsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int) , cudaMemcpyDeviceToHost)); anim_time += anim_dt; } void defineViewMatrix(double phi, double theta) { unsigned int i; double c, s, xy_dist; double x_axis[3], y_axis[3], z_axis[3]; eye[Z] = sin(theta * PI / 180.0); xy_dist = cos(theta * PI / 180.0); c = cos(phi * PI / 180.0); s = sin(phi * PI / 180.0); eye[X] = xy_dist * c; eye[Y] = xy_dist * s; up[X] = - c * eye[Z]; up[Y] = - s * eye[Z]; up[Z] = s * eye[Y] + c * eye[X]; normVec(up); for (i = 0; i < 3; i++) { z_axis[i] = eye[i] - center[i]; } normVec(z_axis); cross(up, z_axis, x_axis); normVec(x_axis); cross(z_axis, x_axis, y_axis); gluLookAt(eye[X], eye[Y], eye[Z], center[X], center[Y], center[Z], up[X], up[Y], up[Z]); } void metaball (float pos[3], float color[3]) { double margin=0; double view[3]={0}; double TH=theta; double PH=-phi; for (int i = 0 ; i < dev_points ; i ++) { view[0] = 0; view[1] = R * cos(i * PI * 2 / dev); view[2] = R * sin(i * PI * 2 / dev); point[i][X] = view[0] * cos(TH * PI / 180) * cos(PH * PI / 180) + view[1] * sin(PH * PI / 180) - view[2] * sin(TH * PI / 180) * cos(PH * PI / 180); point[i][Y] = - view[0] * cos(TH * PI / 180) * sin(PH * PI / 180) + view[1] * cos(PH * PI / 180) + view[2] * sin(TH * PI / 180) * sin(PH * PI / 180); point[i][Z] = view[0] * sin(TH * PI / 180) + view[2] * cos(TH * PI / 180); } glBegin(GL_TRIANGLE_FAN); glColor4f(1,1,1,0.3); glVertex3d(pos[X],pos[Y],pos[Z]); for (int i = 0 ; i < dev_points ; i ++) { glVertex3d(point[i][X] + pos[X], point[i][Y] + pos[Y], point[i][Z] + pos[Z]); } glEnd(); int mh[dev_points]; for (int i = 0 ; i < dev_points ; i ++) { mh[i]=1; } glBegin(GL_POINTS); glColor4f(color[0],color[1],color[2],0.1); for (int k = 0; k < hollow; k++) { margin=(colmargin-1)/10*k+1; for (int i = 0 ; i < dev_points ; i ++) { if((mh[i]==1) && (rand() % dev) < (dev * (hollow-k/2)/hollow)) glVertex3d(margin*point[i][X] + pos[X], margin*point[i][Y] + pos[Y], margin*point[i][Z] + pos[Z]); else mh[i]=0; } } glEnd(); } void display(void) { light_pos[0] = (float)eye[X]; light_pos[1] = (float)eye[Y]; light_pos[2] = (float)eye[Z]; light_pos[3] = 0.0f; runGPUKernel(); glLightfv(GL_LIGHT0, GL_POSITION, light_pos); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(-vision_size-right_motion/2, vision_size+right_motion/2, -vision_size-right_motion/2, vision_size+right_motion/2, -100*vision_size, 100*vision_size); glViewport(0, 0, window_width, window_height); defineViewMatrix(phi, theta); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); float color[3]={0}; for (int k = 0 ; k < NUM_POINTS ; k++) { if(J_point[k]/M/cap-temp < resol){ color[0]=1.0; color[1]=1.0; color[2]=1.0-(J_point[k]/M/cap-temp)/resol; } else if(J_point[k]/M/cap-temp < 2 * resol){ color[0]=1.0; color[1]=1.0-(J_point[k]/M/cap-temp-resol)/resol; color[2]=0.0; } else { color[0]=1.0; color[1]=0.0; color[2]=0.0; } metaball(h_point[k],color); } glutSwapBuffers(); glutPostRedisplay(); } void mouse_button(int button, int state, int x, int y) { if ((state == GLUT_DOWN) && (button == GLUT_LEFT_BUTTON)) motion_p = true; if ((state == GLUT_DOWN) && (button == GLUT_RIGHT_BUTTON)) motion_w = true; else if (state == GLUT_UP) { motion_p = false; motion_w = false; } mouse_old_x = x; mouse_old_y = y; } void mouse_motion(int x, int y) { int dx, dy; dx = x - mouse_old_x; dy = y - mouse_old_y; if (motion_p) { phi -= dx * 0.2; theta += dy * 0.2; } if (motion_w) { right_motion += dx / 10; up_motion -= dy / 10; } mouse_old_x = x; mouse_old_y = y; glutPostRedisplay(); } void resize(int width, int height) { window_width = width; window_height = height; } void keyboard(unsigned char key, int x, int y) { switch (key) { case 'q': case 'Q': case '\033': exit(0); default: break; } } bool initGL(void) { glClearColor(0.0f, 0.0f , 0.0f, 0.5f); glEnable(GL_DEPTH_TEST); glClearDepth(1.0); glDepthFunc(GL_LESS); glEnable(GL_LIGHT0); return true; } int main(int argc, char** argv) { point = (double **)malloc(sizeof(double *) * dev_points); for (int i = 0 ; i < dev_points ; i++) { point[i] = (double *)malloc(sizeof(double) * 3); } glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("3D CUDA Simulation"); glutDisplayFunc(display); glutReshapeFunc(resize); glutKeyboardFunc(keyboard); glutMouseFunc(mouse_button); glutMotionFunc(mouse_motion); setInitialPosition(); if (!initGL()) return 1; glutMainLoop(); cudaFree(dst_point); cudaFree(de_point); cudaFree(dJ_point); for (int i = 0 ; i < dev_points ; i++) { free (point[i]); cudaFree(d_point[i]); cudaFree(dv_point[i]); cudaFree(v_buff[i]); cudaFree(dcolsynctime[i]); cudaFree(dcolsyncindex[i]); } free (point); cudaFree(d_point); cudaFree(dv_point); cudaFree(v_buff); cudaFree(dcolsynctime); cudaFree(dcolsyncindex); cudaDeviceReset(); return 0; }
c4c8534aa66979fb51d6f684048fa7469bc65e18.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_2_bot; int xdim0_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_2_bot; int ydim0_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_2_bot; int xdim1_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_2_bot; int ydim1_update_halo_kernel2_zvel_plus_2_bot_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_plus_2_bot * (y) + \ xdim0_update_halo_kernel2_zvel_plus_2_bot * \ ydim0_update_halo_kernel2_zvel_plus_2_bot * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_plus_2_bot * (y) + \ xdim1_update_halo_kernel2_zvel_plus_2_bot * \ ydim1_update_halo_kernel2_zvel_plus_2_bot * (z)) // user function __device__ inline void update_halo_kernel2_zvel_plus_2_bot(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(0, 2, 0)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(0, 2, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_2_bot( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_2_bot + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_2_bot * ydim0_update_halo_kernel2_zvel_plus_2_bot; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_2_bot + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_2_bot * ydim1_update_halo_kernel2_zvel_plus_2_bot; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_2_bot(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_2_bot(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 94)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(94, "update_halo_kernel2_zvel_plus_2_bot"); OPS_kernels[94].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_bot_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_bot_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_bot_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_bot_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_2_bot, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_plus_2_bot_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_2_bot, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_plus_2_bot_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_2_bot, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_plus_2_bot_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_2_bot, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_plus_2_bot_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[94].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_2_bot), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[94].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[94].mpi_time += t2 - t1; OPS_kernels[94].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[94].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
c4c8534aa66979fb51d6f684048fa7469bc65e18.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_2_bot; int xdim0_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_2_bot; int ydim0_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_2_bot; int xdim1_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_2_bot; int ydim1_update_halo_kernel2_zvel_plus_2_bot_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_plus_2_bot * (y) + \ xdim0_update_halo_kernel2_zvel_plus_2_bot * \ ydim0_update_halo_kernel2_zvel_plus_2_bot * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_plus_2_bot * (y) + \ xdim1_update_halo_kernel2_zvel_plus_2_bot * \ ydim1_update_halo_kernel2_zvel_plus_2_bot * (z)) // user function __device__ inline void update_halo_kernel2_zvel_plus_2_bot(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(0, 2, 0)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(0, 2, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_2_bot( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_2_bot + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_2_bot * ydim0_update_halo_kernel2_zvel_plus_2_bot; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_2_bot + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_2_bot * ydim1_update_halo_kernel2_zvel_plus_2_bot; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_2_bot(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_2_bot(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 94)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(94, "update_halo_kernel2_zvel_plus_2_bot"); OPS_kernels[94].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_bot_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_bot_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_bot_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_bot_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_2_bot, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_plus_2_bot_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_2_bot, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_plus_2_bot_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_2_bot, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_plus_2_bot_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_2_bot, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_plus_2_bot_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[94].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_zvel_plus_2_bot<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[94].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[94].mpi_time += t2 - t1; OPS_kernels[94].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[94].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
0bcf02fe5c0b3093bdf26e2a979c10a23682f239.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include "system_kern.cuh" #define TOTAL_THREADS 65536 #define BLOCK_THREADS 256 __device__ Particles_struct cuda_specie; // simulation data (on device) __device__ flcuda* x1; __device__ flcuda* x3; __device__ flcuda* v1; __device__ flcuda* v2; __device__ flcuda* v3; //electric and magnetic field __device__ flcuda* e1; __device__ flcuda* e2; __device__ flcuda* e3; __device__ flcuda* h1; __device__ flcuda* h2; __device__ flcuda* h3; //current and charge density __device__ flcuda* cur1; __device__ flcuda* cur2; __device__ flcuda* cur3; __device__ flcuda* rho; //is alive array __device__ bool* is_alive; __global__ void StepV(flcuda* x1, flcuda* x3, flcuda* v1, flcuda* v2, flcuda* v3, flcuda* e1, flcuda* e2, flcuda* e3, flcuda* h1, flcuda* h2, flcuda* h3, bool* is_alive, int number, flcuda timestep) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; flcuda gamma, gamma_inv; flcuda e1_val = 0.0, e2_val, e3_val, b1_val, b2_val, b3_val, vv1, vv2, vv3; const flcuda mu0 = (flcuda)1.0e-6; flcuda const1 = cuda_specie.charge*timestep/(flcuda)2.0/cuda_specie.mass; flcuda const2; //if (t->current_time == t->start_time) const1 = const1/2.0; if (is_alive[i] && (i < number)) { e1_val = get_field_e(x1[i], x3[i], e1, 1, cuda_specie) * const1; e2_val = get_field_e(x1[i], x3[i], e2, 2, cuda_specie) * const1; e3_val = get_field_e(x1[i], x3[i], e3, 3, cuda_specie) * const1; b1_val = get_field_h(x1[i] ,x3[i], h1, 1, cuda_specie)*mu0*const1; b2_val = get_field_h(x1[i], x3[i], h2, 2, cuda_specie)*mu0*const1; b3_val = get_field_h(x1[i], x3[i], h3, 3, cuda_specie)*mu0*const1; ////1. Multiplication by relativistic factor ////u(n-1/2) = gamma(n-1/2)*v(n-1/) gamma = get_gamma(i, v1, v2, v3); v1[i] = gamma*v1[i]; v2[i] = gamma*v2[i]; v3[i] = gamma*v3[i]; //2. Half acceleration in the electric field //u'(n) = u(n-1/2) + q*dt/2/m*E(n) v1[i] = v1[i] + e1_val; v2[i] = v2[i] + e2_val; v3[i] = v3[i] + e3_val; //3. Rotation in the magnetic field //u" = u' + 2/(1+B'^2)[(u' + [u'xB'(n)])xB'(n)] //B'(n) = B(n)*q*dt/2/mass/gamma(n) gamma_inv = get_gamma_inv(i, v1, v2, v3); b1_val = b1_val/gamma_inv; b2_val = b2_val/gamma_inv; b3_val = b3_val/gamma_inv; const2 = (flcuda)2.0/((flcuda)1.0 + b1_val*b1_val + b2_val*b2_val + b3_val*b3_val); vv1 = v1[i]; vv2 = v2[i]; vv3 = v3[i]; v1[i] = vv1 + const2*((vv2 - vv1*b3_val + vv3*b1_val)*b3_val - (vv3 + vv1*b2_val - vv2*b1_val)*b2_val); v2[i] = vv2 + const2*(-(vv1 + vv2*b3_val - vv3*b2_val)*b3_val + (vv3 + vv1*b2_val - vv2*b1_val)*b1_val); v3[i] = vv3 + const2*((vv1 + vv2*b3_val - vv3*b2_val)*b2_val - (vv2 - vv1*b3_val + vv3*b1_val)*b1_val); //4. Half acceleration in the electric field //u(n+1/2) = u(n) + q*dt/2/m*E(n) v1[i] = v1[i] + e1_val; v2[i] = v2[i] + e2_val; v3[i] = v3[i] + e3_val; //5. Division by relativistic factor gamma = get_gamma_inv(i, v1, v2, v3); v1[i] = v1[i]/gamma; v2[i] = v2[i]/gamma; v3[i] = v3[i]/gamma; } } __device__ flcuda get_field_e(flcuda x1, flcuda x3, flcuda* e_input, int component, Particles_struct cuda_specie) { int i_r=0; // number of particle i cell int k_z=0; // number of particle k cell int counter; flcuda pi = 3.1415926535897932; flcuda dr = cuda_specie.dr; flcuda dz = cuda_specie.dz; flcuda r1, r2, r3; // temp variables for calculation flcuda dz1, dz2; // temp var.: width of k and k+1 cell flcuda er =0; flcuda efi =0; flcuda ez =0; flcuda vol_1 =0; // volume of i cell; Q/V, V - volume of elementary cell flcuda vol_2 =0; // volume of i+1 cell; //////////////////////// r1 = x1-0.5*dr; r3 = x1+0.5*dr; /////////////////////// switch (component) { case 1: { // weighting of E_r// /////////////////////////////////////////////////// //finding number of cell. example dr=0.5, x1 = 0.7, i_r =0;!! i_r = (int)ceil((x1-0.5*dr)/dr)-1; k_z = (int)ceil((x3)/dz)-1; vol_1 = pi*dz*dr*dr*(2*i_r+1); vol_2 = pi*dz*dr*dr*(2*i_r+3); dz1 = (k_z+1)*dz-x3; dz2 = x3 - k_z*dz; r2 = (i_r+1)*dr; /////////////////////////////////////// //weighting Er[i][k]// counter = get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 1); er = e_input[counter]; er = er*(pi*dz1*(r2*r2-r1*r1))/vol_1; //weighting Er[i+1][k]// er = er + e_input[get_linear_coord(i_r+1, k_z, cuda_specie.grid_num3, 1)]*(pi*dz1*(r3*r3-r2*r2))/vol_2; //weighting Er[i][k+1]// er= er + e_input[get_linear_coord(i_r, k_z+1, cuda_specie.grid_num3, 1)]*(pi*dz2*(r2*r2-r1*r1))/vol_1; //weighting Er[i+1][k+1]// er = er + e_input[get_linear_coord(i_r+1, k_z+1, cuda_specie.grid_num3, 1)]*(pi*dz2*(r3*r3-r2*r2))/vol_2; /////////////////////////////////////////////////////// return er; } case 3: { // weighting of E_z// /////////////////////////////////////////////////////// //finding number of cell. example dz=0.5, x3 = 0.7, z_k =0;!! i_r = (int)ceil((x1)/dr)-1; k_z = (int)ceil((x3-0.5*dz)/dz)-1; /////////////////////////////////// if(x1>dr) { vol_1 = pi*dz*dr*dr*2.0*(flcuda)i_r; } else { vol_1 = pi*dz*dr*dr/4.0; //volume of first cell } r2 = (i_r+0.5)*dr; vol_2 = pi*dz*dr*dr*(2*i_r+2); dz1 = (k_z+1.5)*dz - x3; dz2 = x3 - (k_z+0.5)*dz; ////////////////////////////////////// //weighting Ez[i][k]// ez = ez + e_input[get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 3)]*(pi*dz1*(r2*r2-r1*r1))/vol_1; //weighting Ez[i+1][k]// ez = ez + e_input[get_linear_coord(i_r+1, k_z, cuda_specie.grid_num3, 3)]*pi*dz1*(r3*r3-r2*r2)/vol_2; //weighting Ez[i][k+1]// ez = ez + e_input[get_linear_coord(i_r, k_z+1, cuda_specie.grid_num3, 3)]*pi*dz2*(r2*r2-r1*r1)/vol_1; //weighting Ez[i+1][k+1]// ez = ez + e_input[get_linear_coord(i_r+1, k_z+1, cuda_specie.grid_num3, 3)]*pi*dz2*(r3*r3-r2*r2)/vol_2; return ez; } case 2: { /////////////////////////////////////////////////////// // weighting of E_fi// /////////////////////////////////////////////////////// //finding number of cell. example dz=0.5, x3 = 0.7, z_k =1; i_r = (int)ceil((x1)/dr)-1; k_z = (int)ceil((x3)/dz)-1; if(x1>dr) { vol_1 = pi*dz*dr*dr*2.0*(flcuda)i_r; } else { vol_1 = pi*dz*dr*dr/4.0; //volume of first cell } r2 = (i_r+0.5)*dr; vol_2 = pi*dz*dr*dr*(2*i_r+2); dz1 = (k_z+1)*dz-x3; dz2 = x3-k_z*dz; ////////////////////////////////////// //weighting Efi[i][k]// efi = efi + e_input[get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 2)]*pi*dz1*(r2*r2 - r1*r1)/vol_1; //weighting Efi[i+1][k]// efi = efi + e_input[get_linear_coord(i_r+1, k_z, cuda_specie.grid_num3, 2)]*pi*dz1*(r3*r3-r2*r2)/vol_2; //weighting Efi[i][k+1]// efi = efi + e_input[get_linear_coord(i_r, k_z+1, cuda_specie.grid_num3, 2)]*pi*dz2*(r2*r2-r1*r1)/vol_1; //weighting Efi[i+1][k+1]// efi =efi + e_input[get_linear_coord(i_r+1, k_z+1, cuda_specie.grid_num3, 2)]*pi*dz2*(r3*r3-r2*r2)/vol_2; return efi; } } return 0.0; } __device__ flcuda get_field_h(flcuda x1, flcuda x3, flcuda* h_input, int component, Particles_struct cuda_specie) { int i_r = 0, k_z = 0; flcuda r1, r2, r3, dz1, dz2, hr = 0.0, hfi = 0.0, hz = 0.0, vol_1 = 0.0, vol_2 = 0.0; // volumes of i and i+1 cell; Q/V, V - volume of elementary cell flcuda pi = 3.1415926535897932; flcuda dr = cuda_specie.dr; flcuda dz = cuda_specie.dz; //////////////////////// r1 = x1-0.5*dr; r3 = x1+0.5*dr; /////////////////////// switch (component) { case 3: { // weighting of H_z// i_r = (int)ceil((x1-0.5*dr)/dr)-1; k_z = (int)ceil((x3)/dz)-1; vol_1 = pi*dz*dr*dr*(2*i_r+1); vol_2 = pi*dz*dr*dr*(2*i_r+3); dz1 = (k_z+1)*dz-x3; dz2 = x3 - k_z*dz; r2 = (i_r+1)*dr; /////////////////////////////////////// //weighting Hz[i][k]// hz = hz + h_input[get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 6)]*(pi*dz1*(r2*r2-r1*r1))/vol_1; //weighting Hz[i+1][k]// hz = hz + h_input[get_linear_coord(i_r+1, k_z, cuda_specie.grid_num3, 6)]*(pi*dz1*(r3*r3-r2*r2))/vol_2; //weighting Hz[i][k+1]// hz = hz + h_input[get_linear_coord(i_r, k_z+1, cuda_specie.grid_num3, 6)]*(pi*dz2*(r3*r3-r2*r2))/vol_1; //weighting Hz[i+1][k+1]// hz = hz + h_input[get_linear_coord(i_r+1, k_z+1, cuda_specie.grid_num3, 6)]*(pi*dz2*(r3*r3-r2*r2))/vol_2; return hz; } case 1: { // weighting of Hr// i_r = (int)ceil((x1)/dr)-1; k_z = (int)ceil((x3-0.5*dz)/dz)-1; if(x1>dr) { vol_1 = pi*dz*dr*dr*2*i_r; } else { vol_1 = pi*dz*dr*dr/4.0; //volume of first cell } r2 = (i_r+0.5)*dr; vol_2 = pi*dz*dr*dr*(2*i_r+2); dz1 = (k_z+1.5)*dz - x3; dz2 = x3 - (k_z+0.5)*dz; ////////////////////////////////////// //weighting Hr[i][k]// hr = hr + h_input[get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 4)]*(pi*dz1*(r2*r2-r1*r1))/vol_1; //weighting Hr[i+1][k]// hr = hr + h_input[get_linear_coord(i_r+1, k_z, cuda_specie.grid_num3, 4)]*pi*dz1*(r3*r3-r2*r2)/vol_2; //weighting Hr[i][k+1]// hr = hr + h_input[get_linear_coord(i_r, k_z+1, cuda_specie.grid_num3, 4)]*pi*dz2*(r2*r2-r1*r1)/vol_1; //weighting Hr[i+1][k+1]// hr = hr + h_input[get_linear_coord(i_r+1, k_z+1, cuda_specie.grid_num3, 4)]*pi*dz2*(r3*r3-r2*r2)/vol_2; return hr; } case 2: { // weighting of H_fi// i_r = (int)ceil((x1-0.5*dr)/dr)-1; k_z = (int)ceil((x3-0.5*dz)/dz)-1; r2 = (i_r+1)*dr; vol_1 = pi*dz*dr*dr*(2*i_r+1); vol_2 = pi*dz*dr*dr*(2*i_r+3); dz1 = (k_z+1.5)*dz-x3; dz2 = x3-(k_z+0.5)*dz; //weighting Hfi[i][k]// hfi = hfi + h_input[get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 5)]*pi*dz1*(r2*r2-r1*r1)/vol_1; //hfi = h_input[get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 5)]; //hfi = h_input[1282]; //hfi = i_r; //weighting Hfi[i+1][k]// hfi = hfi + h_input[get_linear_coord(i_r+1, k_z, cuda_specie.grid_num3, 5)]*pi*dz1*(r3*r3-r2*r2)/vol_2; //weighting Hfi[i][k+1]// hfi = hfi + h_input[get_linear_coord(i_r, k_z+1, cuda_specie.grid_num3, 5)]*dz2*pi*(r2*r2-r1*r1)/vol_1; //weighting Hfi[i+1][k+1]// hfi = hfi + h_input[get_linear_coord(i_r+1, k_z+1, cuda_specie.grid_num3, 5)]*pi*dz2*(r3*r3-r2*r2)/vol_2; return hfi; } } return 0.0; } __device__ int get_linear_coord(int index_r, int index_z, int ngrid_z, int component) { //index components: // 1 - er, 2 - e_phi, 3 - e_z // 4 - hr, 5 - h_phi, 6 - h_z switch (component) { case 1: case 2: case 6: return (index_r * ngrid_z + index_z); case 3: case 4: case 5: return (index_r * (ngrid_z - 1) + index_z); } return 0; } __device__ flcuda get_gamma(int i, flcuda* v1, flcuda* v2, flcuda* v3) { return pow((flcuda)1.0 - (v1[i]*v1[i] + v2[i]*v2[i] + v3[i]*v3[i])/(flcuda)300000000.0/(flcuda)300000000.0,(flcuda)-0.5); } __device__ flcuda get_gamma_inv(int i, flcuda* v1, flcuda* v2, flcuda* v3) { return pow((v1[i]*v1[i] + v2[i]*v2[i] + v3[i]*v3[i])/(flcuda)300000000.0/(flcuda)300000000.0 + (flcuda)1.0, (flcuda)0.5); }
0bcf02fe5c0b3093bdf26e2a979c10a23682f239.cu
#include <stdio.h> #include <math.h> #include "system_kern.cuh" #define TOTAL_THREADS 65536 #define BLOCK_THREADS 256 __device__ Particles_struct cuda_specie; // simulation data (on device) __device__ flcuda* x1; __device__ flcuda* x3; __device__ flcuda* v1; __device__ flcuda* v2; __device__ flcuda* v3; //electric and magnetic field __device__ flcuda* e1; __device__ flcuda* e2; __device__ flcuda* e3; __device__ flcuda* h1; __device__ flcuda* h2; __device__ flcuda* h3; //current and charge density __device__ flcuda* cur1; __device__ flcuda* cur2; __device__ flcuda* cur3; __device__ flcuda* rho; //is alive array __device__ bool* is_alive; __global__ void StepV(flcuda* x1, flcuda* x3, flcuda* v1, flcuda* v2, flcuda* v3, flcuda* e1, flcuda* e2, flcuda* e3, flcuda* h1, flcuda* h2, flcuda* h3, bool* is_alive, int number, flcuda timestep) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; flcuda gamma, gamma_inv; flcuda e1_val = 0.0, e2_val, e3_val, b1_val, b2_val, b3_val, vv1, vv2, vv3; const flcuda mu0 = (flcuda)1.0e-6; flcuda const1 = cuda_specie.charge*timestep/(flcuda)2.0/cuda_specie.mass; flcuda const2; //if (t->current_time == t->start_time) const1 = const1/2.0; if (is_alive[i] && (i < number)) { e1_val = get_field_e(x1[i], x3[i], e1, 1, cuda_specie) * const1; e2_val = get_field_e(x1[i], x3[i], e2, 2, cuda_specie) * const1; e3_val = get_field_e(x1[i], x3[i], e3, 3, cuda_specie) * const1; b1_val = get_field_h(x1[i] ,x3[i], h1, 1, cuda_specie)*mu0*const1; b2_val = get_field_h(x1[i], x3[i], h2, 2, cuda_specie)*mu0*const1; b3_val = get_field_h(x1[i], x3[i], h3, 3, cuda_specie)*mu0*const1; ////1. Multiplication by relativistic factor ////u(n-1/2) = gamma(n-1/2)*v(n-1/) gamma = get_gamma(i, v1, v2, v3); v1[i] = gamma*v1[i]; v2[i] = gamma*v2[i]; v3[i] = gamma*v3[i]; //2. Half acceleration in the electric field //u'(n) = u(n-1/2) + q*dt/2/m*E(n) v1[i] = v1[i] + e1_val; v2[i] = v2[i] + e2_val; v3[i] = v3[i] + e3_val; //3. Rotation in the magnetic field //u" = u' + 2/(1+B'^2)[(u' + [u'xB'(n)])xB'(n)] //B'(n) = B(n)*q*dt/2/mass/gamma(n) gamma_inv = get_gamma_inv(i, v1, v2, v3); b1_val = b1_val/gamma_inv; b2_val = b2_val/gamma_inv; b3_val = b3_val/gamma_inv; const2 = (flcuda)2.0/((flcuda)1.0 + b1_val*b1_val + b2_val*b2_val + b3_val*b3_val); vv1 = v1[i]; vv2 = v2[i]; vv3 = v3[i]; v1[i] = vv1 + const2*((vv2 - vv1*b3_val + vv3*b1_val)*b3_val - (vv3 + vv1*b2_val - vv2*b1_val)*b2_val); v2[i] = vv2 + const2*(-(vv1 + vv2*b3_val - vv3*b2_val)*b3_val + (vv3 + vv1*b2_val - vv2*b1_val)*b1_val); v3[i] = vv3 + const2*((vv1 + vv2*b3_val - vv3*b2_val)*b2_val - (vv2 - vv1*b3_val + vv3*b1_val)*b1_val); //4. Half acceleration in the electric field //u(n+1/2) = u(n) + q*dt/2/m*E(n) v1[i] = v1[i] + e1_val; v2[i] = v2[i] + e2_val; v3[i] = v3[i] + e3_val; //5. Division by relativistic factor gamma = get_gamma_inv(i, v1, v2, v3); v1[i] = v1[i]/gamma; v2[i] = v2[i]/gamma; v3[i] = v3[i]/gamma; } } __device__ flcuda get_field_e(flcuda x1, flcuda x3, flcuda* e_input, int component, Particles_struct cuda_specie) { int i_r=0; // number of particle i cell int k_z=0; // number of particle k cell int counter; flcuda pi = 3.1415926535897932; flcuda dr = cuda_specie.dr; flcuda dz = cuda_specie.dz; flcuda r1, r2, r3; // temp variables for calculation flcuda dz1, dz2; // temp var.: width of k and k+1 cell flcuda er =0; flcuda efi =0; flcuda ez =0; flcuda vol_1 =0; // volume of i cell; Q/V, V - volume of elementary cell flcuda vol_2 =0; // volume of i+1 cell; //////////////////////// r1 = x1-0.5*dr; r3 = x1+0.5*dr; /////////////////////// switch (component) { case 1: { // weighting of E_r// /////////////////////////////////////////////////// //finding number of cell. example dr=0.5, x1 = 0.7, i_r =0;!! i_r = (int)ceil((x1-0.5*dr)/dr)-1; k_z = (int)ceil((x3)/dz)-1; vol_1 = pi*dz*dr*dr*(2*i_r+1); vol_2 = pi*dz*dr*dr*(2*i_r+3); dz1 = (k_z+1)*dz-x3; dz2 = x3 - k_z*dz; r2 = (i_r+1)*dr; /////////////////////////////////////// //weighting Er[i][k]// counter = get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 1); er = e_input[counter]; er = er*(pi*dz1*(r2*r2-r1*r1))/vol_1; //weighting Er[i+1][k]// er = er + e_input[get_linear_coord(i_r+1, k_z, cuda_specie.grid_num3, 1)]*(pi*dz1*(r3*r3-r2*r2))/vol_2; //weighting Er[i][k+1]// er= er + e_input[get_linear_coord(i_r, k_z+1, cuda_specie.grid_num3, 1)]*(pi*dz2*(r2*r2-r1*r1))/vol_1; //weighting Er[i+1][k+1]// er = er + e_input[get_linear_coord(i_r+1, k_z+1, cuda_specie.grid_num3, 1)]*(pi*dz2*(r3*r3-r2*r2))/vol_2; /////////////////////////////////////////////////////// return er; } case 3: { // weighting of E_z// /////////////////////////////////////////////////////// //finding number of cell. example dz=0.5, x3 = 0.7, z_k =0;!! i_r = (int)ceil((x1)/dr)-1; k_z = (int)ceil((x3-0.5*dz)/dz)-1; /////////////////////////////////// if(x1>dr) { vol_1 = pi*dz*dr*dr*2.0*(flcuda)i_r; } else { vol_1 = pi*dz*dr*dr/4.0; //volume of first cell } r2 = (i_r+0.5)*dr; vol_2 = pi*dz*dr*dr*(2*i_r+2); dz1 = (k_z+1.5)*dz - x3; dz2 = x3 - (k_z+0.5)*dz; ////////////////////////////////////// //weighting Ez[i][k]// ez = ez + e_input[get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 3)]*(pi*dz1*(r2*r2-r1*r1))/vol_1; //weighting Ez[i+1][k]// ez = ez + e_input[get_linear_coord(i_r+1, k_z, cuda_specie.grid_num3, 3)]*pi*dz1*(r3*r3-r2*r2)/vol_2; //weighting Ez[i][k+1]// ez = ez + e_input[get_linear_coord(i_r, k_z+1, cuda_specie.grid_num3, 3)]*pi*dz2*(r2*r2-r1*r1)/vol_1; //weighting Ez[i+1][k+1]// ez = ez + e_input[get_linear_coord(i_r+1, k_z+1, cuda_specie.grid_num3, 3)]*pi*dz2*(r3*r3-r2*r2)/vol_2; return ez; } case 2: { /////////////////////////////////////////////////////// // weighting of E_fi// /////////////////////////////////////////////////////// //finding number of cell. example dz=0.5, x3 = 0.7, z_k =1; i_r = (int)ceil((x1)/dr)-1; k_z = (int)ceil((x3)/dz)-1; if(x1>dr) { vol_1 = pi*dz*dr*dr*2.0*(flcuda)i_r; } else { vol_1 = pi*dz*dr*dr/4.0; //volume of first cell } r2 = (i_r+0.5)*dr; vol_2 = pi*dz*dr*dr*(2*i_r+2); dz1 = (k_z+1)*dz-x3; dz2 = x3-k_z*dz; ////////////////////////////////////// //weighting Efi[i][k]// efi = efi + e_input[get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 2)]*pi*dz1*(r2*r2 - r1*r1)/vol_1; //weighting Efi[i+1][k]// efi = efi + e_input[get_linear_coord(i_r+1, k_z, cuda_specie.grid_num3, 2)]*pi*dz1*(r3*r3-r2*r2)/vol_2; //weighting Efi[i][k+1]// efi = efi + e_input[get_linear_coord(i_r, k_z+1, cuda_specie.grid_num3, 2)]*pi*dz2*(r2*r2-r1*r1)/vol_1; //weighting Efi[i+1][k+1]// efi =efi + e_input[get_linear_coord(i_r+1, k_z+1, cuda_specie.grid_num3, 2)]*pi*dz2*(r3*r3-r2*r2)/vol_2; return efi; } } return 0.0; } __device__ flcuda get_field_h(flcuda x1, flcuda x3, flcuda* h_input, int component, Particles_struct cuda_specie) { int i_r = 0, k_z = 0; flcuda r1, r2, r3, dz1, dz2, hr = 0.0, hfi = 0.0, hz = 0.0, vol_1 = 0.0, vol_2 = 0.0; // volumes of i and i+1 cell; Q/V, V - volume of elementary cell flcuda pi = 3.1415926535897932; flcuda dr = cuda_specie.dr; flcuda dz = cuda_specie.dz; //////////////////////// r1 = x1-0.5*dr; r3 = x1+0.5*dr; /////////////////////// switch (component) { case 3: { // weighting of H_z// i_r = (int)ceil((x1-0.5*dr)/dr)-1; k_z = (int)ceil((x3)/dz)-1; vol_1 = pi*dz*dr*dr*(2*i_r+1); vol_2 = pi*dz*dr*dr*(2*i_r+3); dz1 = (k_z+1)*dz-x3; dz2 = x3 - k_z*dz; r2 = (i_r+1)*dr; /////////////////////////////////////// //weighting Hz[i][k]// hz = hz + h_input[get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 6)]*(pi*dz1*(r2*r2-r1*r1))/vol_1; //weighting Hz[i+1][k]// hz = hz + h_input[get_linear_coord(i_r+1, k_z, cuda_specie.grid_num3, 6)]*(pi*dz1*(r3*r3-r2*r2))/vol_2; //weighting Hz[i][k+1]// hz = hz + h_input[get_linear_coord(i_r, k_z+1, cuda_specie.grid_num3, 6)]*(pi*dz2*(r3*r3-r2*r2))/vol_1; //weighting Hz[i+1][k+1]// hz = hz + h_input[get_linear_coord(i_r+1, k_z+1, cuda_specie.grid_num3, 6)]*(pi*dz2*(r3*r3-r2*r2))/vol_2; return hz; } case 1: { // weighting of Hr// i_r = (int)ceil((x1)/dr)-1; k_z = (int)ceil((x3-0.5*dz)/dz)-1; if(x1>dr) { vol_1 = pi*dz*dr*dr*2*i_r; } else { vol_1 = pi*dz*dr*dr/4.0; //volume of first cell } r2 = (i_r+0.5)*dr; vol_2 = pi*dz*dr*dr*(2*i_r+2); dz1 = (k_z+1.5)*dz - x3; dz2 = x3 - (k_z+0.5)*dz; ////////////////////////////////////// //weighting Hr[i][k]// hr = hr + h_input[get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 4)]*(pi*dz1*(r2*r2-r1*r1))/vol_1; //weighting Hr[i+1][k]// hr = hr + h_input[get_linear_coord(i_r+1, k_z, cuda_specie.grid_num3, 4)]*pi*dz1*(r3*r3-r2*r2)/vol_2; //weighting Hr[i][k+1]// hr = hr + h_input[get_linear_coord(i_r, k_z+1, cuda_specie.grid_num3, 4)]*pi*dz2*(r2*r2-r1*r1)/vol_1; //weighting Hr[i+1][k+1]// hr = hr + h_input[get_linear_coord(i_r+1, k_z+1, cuda_specie.grid_num3, 4)]*pi*dz2*(r3*r3-r2*r2)/vol_2; return hr; } case 2: { // weighting of H_fi// i_r = (int)ceil((x1-0.5*dr)/dr)-1; k_z = (int)ceil((x3-0.5*dz)/dz)-1; r2 = (i_r+1)*dr; vol_1 = pi*dz*dr*dr*(2*i_r+1); vol_2 = pi*dz*dr*dr*(2*i_r+3); dz1 = (k_z+1.5)*dz-x3; dz2 = x3-(k_z+0.5)*dz; //weighting Hfi[i][k]// hfi = hfi + h_input[get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 5)]*pi*dz1*(r2*r2-r1*r1)/vol_1; //hfi = h_input[get_linear_coord(i_r, k_z, cuda_specie.grid_num3, 5)]; //hfi = h_input[1282]; //hfi = i_r; //weighting Hfi[i+1][k]// hfi = hfi + h_input[get_linear_coord(i_r+1, k_z, cuda_specie.grid_num3, 5)]*pi*dz1*(r3*r3-r2*r2)/vol_2; //weighting Hfi[i][k+1]// hfi = hfi + h_input[get_linear_coord(i_r, k_z+1, cuda_specie.grid_num3, 5)]*dz2*pi*(r2*r2-r1*r1)/vol_1; //weighting Hfi[i+1][k+1]// hfi = hfi + h_input[get_linear_coord(i_r+1, k_z+1, cuda_specie.grid_num3, 5)]*pi*dz2*(r3*r3-r2*r2)/vol_2; return hfi; } } return 0.0; } __device__ int get_linear_coord(int index_r, int index_z, int ngrid_z, int component) { //index components: // 1 - er, 2 - e_phi, 3 - e_z // 4 - hr, 5 - h_phi, 6 - h_z switch (component) { case 1: case 2: case 6: return (index_r * ngrid_z + index_z); case 3: case 4: case 5: return (index_r * (ngrid_z - 1) + index_z); } return 0; } __device__ flcuda get_gamma(int i, flcuda* v1, flcuda* v2, flcuda* v3) { return pow((flcuda)1.0 - (v1[i]*v1[i] + v2[i]*v2[i] + v3[i]*v3[i])/(flcuda)300000000.0/(flcuda)300000000.0,(flcuda)-0.5); } __device__ flcuda get_gamma_inv(int i, flcuda* v1, flcuda* v2, flcuda* v3) { return pow((v1[i]*v1[i] + v2[i]*v2[i] + v3[i]*v3[i])/(flcuda)300000000.0/(flcuda)300000000.0 + (flcuda)1.0, (flcuda)0.5); }
ef09782a1c51aebb4b6e38dc9feda53c4f0bd156.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/hip/ForeachFunctors.cuh> #include <ATen/NumericUtils.h> namespace at { namespace native { template<template<class> class Op> std::vector<Tensor> foreach_pointwise_op(TensorList input, TensorList tensors1, TensorList tensors2, const Scalar& scalar) { std::vector<std::vector<at::Tensor>> tensor_lists; std::vector<at::Tensor> vec_res; vec_res.reserve(input.size()); for (const auto& t: input) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(input.vec()); tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, input[0].scalar_type(), "foreach_pointwise_op_cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<4>(tensor_lists, PointwiseOpScalarFunctor<scalar_t, /* depth */ 4, /* r_args_depth */ 3, /* res_arg_index */ 3>(), Op<opmath_t>(), scalar.to<opmath_t>()); }); return tensor_lists[3]; } template<template<class> class Op> void foreach_pointwise_op_(TensorList input, TensorList tensors1, TensorList tensors2, const Scalar& scalar) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.emplace_back(input.vec()); tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, input[0].scalar_type(), "foreach_pointwise_op__cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<3>(tensor_lists, PointwiseOpScalarFunctor<scalar_t, /* depth */ 3, /* r_args_depth */ 3, /* res_arg_index */ 0>(), Op<opmath_t>(), scalar.to<opmath_t>()); }); } template<template<class> class Op> void foreach_pointwise_op_(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.reserve(3); tensor_lists.emplace_back(input.vec()); tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, input[0].scalar_type(), "foreach_pointwise_op__cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<3, opmath_t>(tensor_lists, scalars, PointwiseOpScalarListFunctor<scalar_t, /* depth */ 3, /* r_args_depth */ 3, /* res_arg_index */ 0>(), Op<opmath_t>()); }); } template<template<class> class Op> std::vector<Tensor> foreach_pointwise_op(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.reserve(4); std::vector<at::Tensor> vec_res; vec_res.reserve(input.size()); for (const auto& t: input) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(input.vec()); tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, input[0].scalar_type(), "foreach_pointwise_op_cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<4, opmath_t>(tensor_lists, scalars, PointwiseOpScalarListFunctor<scalar_t, /* depth */ 4, /* r_args_depth */ 3, /* res_arg_index */ 3>(), Op<opmath_t>()); }); return tensor_lists[3]; } #define FOREACH_POINTWISE_OP_SCALAR(NAME, OP) \ std::vector<Tensor> foreach_tensor_##NAME##_scalar_cuda(TensorList input, TensorList tensors1, TensorList tensors2, const Scalar& scalar) { \ check_foreach_api_restrictions(input, tensors1, tensors2); \ \ if (!can_use_fast_route({input, tensors1, tensors2}, scalar) || has_integral_tensor(input, /* includeBool */ true)) { \ return at::native::foreach_tensor_##NAME##_scalar_slow(input, tensors1, tensors2, scalar); \ } \ \ return foreach_pointwise_op<OP>(input, tensors1, tensors2, scalar); \ } \ \ void foreach_tensor_##NAME##_scalar_cuda_(TensorList input, TensorList tensors1, TensorList tensors2, const Scalar& scalar) { \ check_foreach_api_restrictions(input, tensors1, tensors2); \ \ if (!can_use_fast_route({input, tensors1, tensors2}, scalar) || has_integral_tensor(input, /* includeBool */ true)) { \ return at::native::foreach_tensor_##NAME##_scalar_slow_(input, tensors1, tensors2, scalar); \ } \ \ foreach_pointwise_op_<OP>(input, tensors1, tensors2, scalar); \ } #define FOREACH_POINTWISE_OP_SCALARLIST(NAME, OP) \ std::vector<Tensor> foreach_tensor_##NAME##_scalarlist_cuda(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) { \ check_foreach_api_restrictions(input, tensors1, tensors2, scalars); \ \ if (!can_use_fast_route({input, tensors1, tensors2}, scalars) || has_integral_tensor(input, /* includeBool */ true)) { \ return at::native::foreach_tensor_##NAME##_scalarlist_slow(input, tensors1, tensors2, scalars); \ } \ \ return foreach_pointwise_op<OP>(input, tensors1, tensors2, scalars); \ } \ \ void foreach_tensor_##NAME##_scalarlist_cuda_(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) { \ check_foreach_api_restrictions(input, tensors1, tensors2, scalars); \ \ if (!can_use_fast_route({input, tensors1, tensors2}, scalars) || has_integral_tensor(input, /* includeBool */ true)) { \ return at::native::foreach_tensor_##NAME##_scalarlist_slow_(input, tensors1, tensors2, scalars); \ } \ \ foreach_pointwise_op_<OP>(input, tensors1, tensors2, scalars); \ } FOREACH_POINTWISE_OP_SCALAR(addcmul, std::multiplies); FOREACH_POINTWISE_OP_SCALAR(addcdiv, std::divides); FOREACH_POINTWISE_OP_SCALARLIST(addcmul, std::multiplies); FOREACH_POINTWISE_OP_SCALARLIST(addcdiv, std::divides); // Why bool tensors are pushed to slowpath? // Because `AT_DISPATCH_ALL_TYPES_AND` is used below. // TODO(mkozuki): Check whether it's possible to handle bool tensors in fastpath. #define FOREACH_MAXIMUM_MINIMUM_OP(NAME, OP) \ std::vector<Tensor> foreach_tensor_##NAME##_cuda(TensorList tensors1, TensorList tensors2) { \ check_foreach_api_restrictions(tensors1, tensors2); \ if (!can_use_fast_route({tensors1, tensors2}) || has_bool_tensor(tensors1)) { \ return at::native::foreach_tensor_##NAME##_slow(tensors1, tensors2); \ } \ \ std::vector<std::vector<at::Tensor>> tensor_lists; \ std::vector<at::Tensor> vec_res; \ vec_res.reserve(tensors1.size()); \ for (const auto& t: tensors1) { \ vec_res.emplace_back(at::native::empty_like(t)); \ } \ \ tensor_lists.emplace_back(tensors1.vec()); \ tensor_lists.emplace_back(tensors2.vec()); \ tensor_lists.emplace_back(std::move(vec_res)); \ \ AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, tensors1[0].scalar_type(), "foreach_maximum_minimum_op_cuda", [&]() { \ using opmath_t = get_opmath_t<scalar_t>::opmath_t; \ auto op = [] GPU_LAMBDA (opmath_t a, opmath_t b) -> opmath_t { \ opmath_t c = a OP b ? a : b; \ if (_isnan(a)) { \ c = a; \ } \ return c;}; \ multi_tensor_apply<3>(tensor_lists, \ PointwiseOpListFunctor<scalar_t, 3>(), \ op); \ }); \ \ return tensor_lists[2]; \ } \ FOREACH_MAXIMUM_MINIMUM_OP(maximum, >) FOREACH_MAXIMUM_MINIMUM_OP(minimum, <) }} // namespace at::native
ef09782a1c51aebb4b6e38dc9feda53c4f0bd156.cu
#include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/cuda/ForeachFunctors.cuh> #include <ATen/NumericUtils.h> namespace at { namespace native { template<template<class> class Op> std::vector<Tensor> foreach_pointwise_op(TensorList input, TensorList tensors1, TensorList tensors2, const Scalar& scalar) { std::vector<std::vector<at::Tensor>> tensor_lists; std::vector<at::Tensor> vec_res; vec_res.reserve(input.size()); for (const auto& t: input) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(input.vec()); tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, input[0].scalar_type(), "foreach_pointwise_op_cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<4>(tensor_lists, PointwiseOpScalarFunctor<scalar_t, /* depth */ 4, /* r_args_depth */ 3, /* res_arg_index */ 3>(), Op<opmath_t>(), scalar.to<opmath_t>()); }); return tensor_lists[3]; } template<template<class> class Op> void foreach_pointwise_op_(TensorList input, TensorList tensors1, TensorList tensors2, const Scalar& scalar) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.emplace_back(input.vec()); tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, input[0].scalar_type(), "foreach_pointwise_op__cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<3>(tensor_lists, PointwiseOpScalarFunctor<scalar_t, /* depth */ 3, /* r_args_depth */ 3, /* res_arg_index */ 0>(), Op<opmath_t>(), scalar.to<opmath_t>()); }); } template<template<class> class Op> void foreach_pointwise_op_(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.reserve(3); tensor_lists.emplace_back(input.vec()); tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, input[0].scalar_type(), "foreach_pointwise_op__cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<3, opmath_t>(tensor_lists, scalars, PointwiseOpScalarListFunctor<scalar_t, /* depth */ 3, /* r_args_depth */ 3, /* res_arg_index */ 0>(), Op<opmath_t>()); }); } template<template<class> class Op> std::vector<Tensor> foreach_pointwise_op(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.reserve(4); std::vector<at::Tensor> vec_res; vec_res.reserve(input.size()); for (const auto& t: input) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(input.vec()); tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, input[0].scalar_type(), "foreach_pointwise_op_cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<4, opmath_t>(tensor_lists, scalars, PointwiseOpScalarListFunctor<scalar_t, /* depth */ 4, /* r_args_depth */ 3, /* res_arg_index */ 3>(), Op<opmath_t>()); }); return tensor_lists[3]; } #define FOREACH_POINTWISE_OP_SCALAR(NAME, OP) \ std::vector<Tensor> foreach_tensor_##NAME##_scalar_cuda(TensorList input, TensorList tensors1, TensorList tensors2, const Scalar& scalar) { \ check_foreach_api_restrictions(input, tensors1, tensors2); \ \ if (!can_use_fast_route({input, tensors1, tensors2}, scalar) || has_integral_tensor(input, /* includeBool */ true)) { \ return at::native::foreach_tensor_##NAME##_scalar_slow(input, tensors1, tensors2, scalar); \ } \ \ return foreach_pointwise_op<OP>(input, tensors1, tensors2, scalar); \ } \ \ void foreach_tensor_##NAME##_scalar_cuda_(TensorList input, TensorList tensors1, TensorList tensors2, const Scalar& scalar) { \ check_foreach_api_restrictions(input, tensors1, tensors2); \ \ if (!can_use_fast_route({input, tensors1, tensors2}, scalar) || has_integral_tensor(input, /* includeBool */ true)) { \ return at::native::foreach_tensor_##NAME##_scalar_slow_(input, tensors1, tensors2, scalar); \ } \ \ foreach_pointwise_op_<OP>(input, tensors1, tensors2, scalar); \ } #define FOREACH_POINTWISE_OP_SCALARLIST(NAME, OP) \ std::vector<Tensor> foreach_tensor_##NAME##_scalarlist_cuda(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) { \ check_foreach_api_restrictions(input, tensors1, tensors2, scalars); \ \ if (!can_use_fast_route({input, tensors1, tensors2}, scalars) || has_integral_tensor(input, /* includeBool */ true)) { \ return at::native::foreach_tensor_##NAME##_scalarlist_slow(input, tensors1, tensors2, scalars); \ } \ \ return foreach_pointwise_op<OP>(input, tensors1, tensors2, scalars); \ } \ \ void foreach_tensor_##NAME##_scalarlist_cuda_(TensorList input, TensorList tensors1, TensorList tensors2, at::ArrayRef<Scalar> scalars) { \ check_foreach_api_restrictions(input, tensors1, tensors2, scalars); \ \ if (!can_use_fast_route({input, tensors1, tensors2}, scalars) || has_integral_tensor(input, /* includeBool */ true)) { \ return at::native::foreach_tensor_##NAME##_scalarlist_slow_(input, tensors1, tensors2, scalars); \ } \ \ foreach_pointwise_op_<OP>(input, tensors1, tensors2, scalars); \ } FOREACH_POINTWISE_OP_SCALAR(addcmul, std::multiplies); FOREACH_POINTWISE_OP_SCALAR(addcdiv, std::divides); FOREACH_POINTWISE_OP_SCALARLIST(addcmul, std::multiplies); FOREACH_POINTWISE_OP_SCALARLIST(addcdiv, std::divides); // Why bool tensors are pushed to slowpath? // Because `AT_DISPATCH_ALL_TYPES_AND` is used below. // TODO(mkozuki): Check whether it's possible to handle bool tensors in fastpath. #define FOREACH_MAXIMUM_MINIMUM_OP(NAME, OP) \ std::vector<Tensor> foreach_tensor_##NAME##_cuda(TensorList tensors1, TensorList tensors2) { \ check_foreach_api_restrictions(tensors1, tensors2); \ if (!can_use_fast_route({tensors1, tensors2}) || has_bool_tensor(tensors1)) { \ return at::native::foreach_tensor_##NAME##_slow(tensors1, tensors2); \ } \ \ std::vector<std::vector<at::Tensor>> tensor_lists; \ std::vector<at::Tensor> vec_res; \ vec_res.reserve(tensors1.size()); \ for (const auto& t: tensors1) { \ vec_res.emplace_back(at::native::empty_like(t)); \ } \ \ tensor_lists.emplace_back(tensors1.vec()); \ tensor_lists.emplace_back(tensors2.vec()); \ tensor_lists.emplace_back(std::move(vec_res)); \ \ AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, tensors1[0].scalar_type(), "foreach_maximum_minimum_op_cuda", [&]() { \ using opmath_t = get_opmath_t<scalar_t>::opmath_t; \ auto op = [] GPU_LAMBDA (opmath_t a, opmath_t b) -> opmath_t { \ opmath_t c = a OP b ? a : b; \ if (_isnan(a)) { \ c = a; \ } \ return c;}; \ multi_tensor_apply<3>(tensor_lists, \ PointwiseOpListFunctor<scalar_t, 3>(), \ op); \ }); \ \ return tensor_lists[2]; \ } \ FOREACH_MAXIMUM_MINIMUM_OP(maximum, >) FOREACH_MAXIMUM_MINIMUM_OP(minimum, <) }} // namespace at::native
c96f7914668e02222c9f5ddb509291c63735d649.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cassert> #include <chrono> #include <cstdio> #include <cstdlib> #include <cstring> #include <iostream> #include <random> __global__ void multiplication( int * a, int * b, int * c, int dim) { int column = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; if ( (row < dim) && (column < dim) ) { int value = 0; for ( int k = 0; k < dim; ++k) { value += a[row * dim + k] * b[k * dim + column]; } c[row * dim + column] = value; } } void compute_on_device( int dim, int * host_a, int * host_b, int * host_c) { constexpr int tile_dim = 32; // allocate device memory int * device_a, * device_b, * device_c; hipMalloc( & device_a, dim * dim * sizeof( int) ); hipMalloc( & device_b, dim * dim * sizeof( int) ); hipMalloc( & device_c, dim * dim * sizeof( int) ); // copy input matrices from host to device memory hipMemcpy( device_a, host_a, dim * dim * sizeof( int), hipMemcpyHostToDevice); hipMemcpy( device_b, host_b, dim * dim * sizeof( int), hipMemcpyHostToDevice); dim3 block_dim{ tile_dim, tile_dim }; dim3 grid_dim{ static_cast< unsigned int >( ::ceil( dim/static_cast< float >( block_dim.x) ) ), static_cast< unsigned int >( ::ceil( dim/static_cast< float >( block_dim.y) ) ) }; auto start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( multiplication), dim3(grid_dim), dim3(block_dim) , 0, 0, device_a, device_b, device_c, dim); hipDeviceSynchronize(); auto duration = std::chrono::high_resolution_clock::now() - start; std::cout << "device: " << std::chrono::duration_cast< std::chrono::microseconds >( duration).count() << " ms\n"; hipMemcpy( host_c, device_c, dim * dim * sizeof( int), hipMemcpyDeviceToHost); hipFree( device_a); hipFree( device_b); hipFree( device_c); } void compute_on_host( int dim, int * a, int * b, int * c) { auto start = std::chrono::high_resolution_clock::now(); for ( int row = 0; row < dim; ++row) { for ( int column = 0; column < dim; ++column) { for ( int k = 0; k < dim; ++k) { c[row * dim + column] += a[row * dim + k] * b[k * dim + column]; } } } auto duration = std::chrono::high_resolution_clock::now() - start; std::cout << "host: " << std::chrono::duration_cast< std::chrono::microseconds >( duration).count() << " ms\n"; } bool equal( int dim, int * host, int * device) { for ( int row = 0; row < dim; ++row) { for ( int column = 0; column < dim; ++column) { if ( host[row * dim + column] != device[row * dim + column]) { return false; } } } return true; } int main() { constexpr int dim = 1024; // allocate host memory int * host_a = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); int * host_b = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); // initialize input matrices std::minstd_rand generator; std::uniform_int_distribution<> distribution{ 0, 255 }; for ( unsigned int i = 0; i < dim*dim; ++i) { host_a[i] = distribution( generator); host_b[i] = host_a[i]; } // multiplication on host int * host_c = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); compute_on_host( dim, host_a, host_b, host_c); // multiplication on device int * device_c = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); compute_on_device( dim, host_a, host_b, device_c); if ( ! equal( dim, host_c, device_c) ) { std::cout << "matrices are not equal" << std::endl; } std::free( host_a); std::free( host_b); std::free( host_c); std::free( device_c); return EXIT_SUCCESS; }
c96f7914668e02222c9f5ddb509291c63735d649.cu
#include <cassert> #include <chrono> #include <cstdio> #include <cstdlib> #include <cstring> #include <iostream> #include <random> __global__ void multiplication( int * a, int * b, int * c, int dim) { int column = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; if ( (row < dim) && (column < dim) ) { int value = 0; for ( int k = 0; k < dim; ++k) { value += a[row * dim + k] * b[k * dim + column]; } c[row * dim + column] = value; } } void compute_on_device( int dim, int * host_a, int * host_b, int * host_c) { constexpr int tile_dim = 32; // allocate device memory int * device_a, * device_b, * device_c; cudaMalloc( & device_a, dim * dim * sizeof( int) ); cudaMalloc( & device_b, dim * dim * sizeof( int) ); cudaMalloc( & device_c, dim * dim * sizeof( int) ); // copy input matrices from host to device memory cudaMemcpy( device_a, host_a, dim * dim * sizeof( int), cudaMemcpyHostToDevice); cudaMemcpy( device_b, host_b, dim * dim * sizeof( int), cudaMemcpyHostToDevice); dim3 block_dim{ tile_dim, tile_dim }; dim3 grid_dim{ static_cast< unsigned int >( std::ceil( dim/static_cast< float >( block_dim.x) ) ), static_cast< unsigned int >( std::ceil( dim/static_cast< float >( block_dim.y) ) ) }; auto start = std::chrono::high_resolution_clock::now(); multiplication<<< grid_dim, block_dim >>>( device_a, device_b, device_c, dim); cudaDeviceSynchronize(); auto duration = std::chrono::high_resolution_clock::now() - start; std::cout << "device: " << std::chrono::duration_cast< std::chrono::microseconds >( duration).count() << " ms\n"; cudaMemcpy( host_c, device_c, dim * dim * sizeof( int), cudaMemcpyDeviceToHost); cudaFree( device_a); cudaFree( device_b); cudaFree( device_c); } void compute_on_host( int dim, int * a, int * b, int * c) { auto start = std::chrono::high_resolution_clock::now(); for ( int row = 0; row < dim; ++row) { for ( int column = 0; column < dim; ++column) { for ( int k = 0; k < dim; ++k) { c[row * dim + column] += a[row * dim + k] * b[k * dim + column]; } } } auto duration = std::chrono::high_resolution_clock::now() - start; std::cout << "host: " << std::chrono::duration_cast< std::chrono::microseconds >( duration).count() << " ms\n"; } bool equal( int dim, int * host, int * device) { for ( int row = 0; row < dim; ++row) { for ( int column = 0; column < dim; ++column) { if ( host[row * dim + column] != device[row * dim + column]) { return false; } } } return true; } int main() { constexpr int dim = 1024; // allocate host memory int * host_a = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); int * host_b = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); // initialize input matrices std::minstd_rand generator; std::uniform_int_distribution<> distribution{ 0, 255 }; for ( unsigned int i = 0; i < dim*dim; ++i) { host_a[i] = distribution( generator); host_b[i] = host_a[i]; } // multiplication on host int * host_c = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); compute_on_host( dim, host_a, host_b, host_c); // multiplication on device int * device_c = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); compute_on_device( dim, host_a, host_b, device_c); if ( ! equal( dim, host_c, device_c) ) { std::cout << "matrices are not equal" << std::endl; } std::free( host_a); std::free( host_b); std::free( host_c); std::free( device_c); return EXIT_SUCCESS; }
e4d29319ca87c60429be0e69df9f854b4a83e8e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<iostream> #include <cstdlib> #include <sys/time.h> #include<limits.h> #include<algorithm> using namespace std; #define maxVertices 8192 #define INF INT_MAX-1 #define NS 64 #define THREADSPB 1024 float dist[maxVertices * maxVertices]; float *device_matrix; float *result_matrix; int vertices; int tilesize[3]; size_t tot; __global__ void FloydWarshall(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, float *matrix, int n, int na) { int j = blockIdx.x * blockDim.x + threadIdx.x + Xj; int i = blockIdx.y * blockDim.y + threadIdx.y + Xi; if (j >= na || (i >= na)) return; __shared__ float thisrowkthcolumn; if (n > NS) { for (int via = Vi; via < (Vi + n); via++) { if (threadIdx.x == 0) thisrowkthcolumn = matrix[i * na + via]; __syncthreads(); if (i != j && i != via && j != via) matrix[i * na + j] = min(matrix[i * na + j], thisrowkthcolumn + matrix[via * na + j]); } } else { __shared__ float work[NS]; work[i * na + j] = matrix[i * na + j]; for (int via = Vi; via < (Vi + n); via++) { work[i * na + via] = matrix[i *na + via]; work[via * na + j] = matrix[via *na + j]; } __syncthreads(); for (int via = Vi; via < (Vi + n); via++) { if (i != j && j != via && i != via) work[i * na + j] = min(work[i * na + j], work[i * na + via] + work[via * na + j]); } __syncthreads(); for (int via = Vi; via < (Vi + n); via++) matrix[i * na + j] = work[i * na + j]; } } void F_loop_FW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n) { dim3 blocks_per_grid((n + THREADSPB - 1) / THREADSPB, n); hipLaunchKernelGGL(( FloydWarshall), dim3(blocks_per_grid), dim3(THREADSPB), 0, 0, Xi, Xj, Ui, Uj, Vi, Vj, device_matrix, n, vertices); hipDeviceSynchronize(); } __global__ void A_FloydWarshall(int via, int from, int to, float *matrix, int n) { matrix[from * n + to] = min(matrix[from * n + to], matrix[from * n + via] + matrix[via * n + to]); } void A_F_loop_FW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n) { for(int via = Uj; via < Uj + n; via++) { for(int from = Xi; from < Xi + n; from++) { for(int to = Xj; to < Xj + n ; to++) { if(from!=to && from!=via && to!=via) { hipLaunchKernelGGL(( A_FloydWarshall), dim3(1), dim3(1), 0, 0, via, from, to, device_matrix, vertices); } } } } } /* void F_loop_FW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n) { for(int via = Uj; via < Uj + n; via++) { for(int from = Xi; from < Xi + n; from++) { for(int to = Xj; to < Xj + n ; to++) { if(from!=to && from!=via && to!=via) { dist[from * vertices + to] = min(dist[from * vertices + to], dist[from * vertices + via]+dist[via * vertices + to]); } } } } printarray(vertices); } */ void DFW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n, int d) { int r = tilesize[d]; if (n < r) F_loop_FW(Xi, Xj, Ui, Uj, Vi, Vj, n); else { for (int via = 0; via < r; via++) { int p = via * (n/r); for (int i = 0; i < r; i++) for (int j = 0; j < r; j++) { int ip = i * (n/r); int jp = j * (n/r); if (i != via && j != via) DFW(Xi + ip, Xj + jp, Ui + ip, Uj + p, Vi + p, Vj + jp, n/r, d + 1); } } } } void BFW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n, int d) { int r = tilesize[d]; if (n < r) F_loop_FW(Xi, Xj, Ui, Uj, Vi, Vj, n); else { for (int via = 0; via < r; via++) { int p = via * (n/r); for (int j = 0; j < r; j++) { int ip = j * (n/r); if (j != via) BFW(Xi + p, Xj + ip , Ui + p, Uj + p, Vi + p, Vj + ip, n/r, d + 1); } for (int i = 0; i < r; i++) for (int j = 0; j < r; j++) { int ip = i * (n/r); int jp = j * (n/r); if (i != via && j != via) DFW(Xi + ip, Xj + jp, Ui + ip, Uj + p, Vi + p, Vj + jp, n/r, d + 1); } } } } void CFW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n, int d) { int r = tilesize[d]; if (n < r) F_loop_FW(Xi, Xj, Ui, Uj, Vi, Vj, n); else { for (int via = 0; via < r; via++) { int p = via * (n/r); for (int j = 0; j < r; j++) { int ip = j * (n/r); if (j != via) CFW(Xi + ip, Xj + p , Ui + ip, Uj + p, Vi + p, Vj + p, n/r, d + 1); } for (int i = 0; i < r; i++) for (int j = 0; j < r; j++) { int ip = i * (n/r); int jp = j * (n/r); if (i != via && j != via) DFW(Xi + ip, Xj + jp, Ui + ip, Uj + p, Vi + p, Vj + jp, n/r, d + 1); } } } } void AFW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n, int d) { int r = tilesize[d]; if (n < r) A_F_loop_FW(Xi, Xj, Ui, Uj, Vi, Vj, n); else { for (int via = 0; via < r; via++) { int p = via * (n/r); AFW(Xi + p, Xj + p, Ui + p, Uj + p, Vi + p, Vj + p, n/r, d + 1); for (int j = 0; j < r; j++) { int ip = j * (n/r); if (j != via) BFW(Xi + p, Xj + ip , Ui + p, Uj + p, Vi + p, Vj + ip, n/r, d + 1); } for (int j = 0; j < r; j++) { int ip = j * (n/r); if (j != via) CFW(Xi + ip, Xj + p , Ui + ip, Uj + p, Vi + p, Vj + p, n/r, d + 1); } for (int i = 0; i < r; i++) for (int j = 0; j < r; j++) { int ip = i * (n/r); int jp = j * (n/r); if (i != via && j != via) DFW(Xi + ip, Xj + jp, Ui + ip, Uj + p, Vi + p, Vj + jp, n/r, d + 1); } } } } int main(int argc, char *argv[]) { char *arg_vertices = getenv("N_VERTICES"); vertices = atoi(arg_vertices); tilesize[0] = 2; tilesize[1] = vertices/NS; tilesize[2] = INF; for(int i = 0 ; i < vertices ; i++ ) { for(int j = 0 ; j< vertices; j++ ) { if( i == j ) dist[i * vertices + j] = 0; else { int num = i + j; if (num % 3 == 0) dist[i * vertices + j] = num / 2; else if (num % 2 == 0) dist[i * vertices + j] = num * 2; else dist[i * vertices + j] = num; } } } struct timeval tvalBefore, tvalAfter; tot = vertices * vertices * sizeof(float); device_matrix = NULL; hipMalloc((float **)&device_matrix, tot); hipMemcpy(device_matrix, dist, tot, hipMemcpyHostToDevice); result_matrix =(float *)malloc( vertices * vertices * sizeof(float)); gettimeofday (&tvalBefore, NULL); AFW(0, 0, 0, 0, 0, 0, vertices, 0); hipMemcpy(result_matrix, device_matrix, tot, hipMemcpyDeviceToHost); gettimeofday (&tvalAfter, NULL); printf("Time: %ld microseconds\n", ((tvalAfter.tv_sec - tvalBefore.tv_sec)*1000000L +tvalAfter.tv_usec) - tvalBefore.tv_usec ); for(int i = 0 ; i < vertices; i++ ) { cout << "\n"; for(int j = 0 ; j< vertices ;j++ ) cout << result_matrix[i * vertices + j] << " " ; } return 0; }
e4d29319ca87c60429be0e69df9f854b4a83e8e2.cu
#include<stdio.h> #include<iostream> #include <cstdlib> #include <sys/time.h> #include<limits.h> #include<algorithm> using namespace std; #define maxVertices 8192 #define INF INT_MAX-1 #define NS 64 #define THREADSPB 1024 float dist[maxVertices * maxVertices]; float *device_matrix; float *result_matrix; int vertices; int tilesize[3]; size_t tot; __global__ void FloydWarshall(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, float *matrix, int n, int na) { int j = blockIdx.x * blockDim.x + threadIdx.x + Xj; int i = blockIdx.y * blockDim.y + threadIdx.y + Xi; if (j >= na || (i >= na)) return; __shared__ float thisrowkthcolumn; if (n > NS) { for (int via = Vi; via < (Vi + n); via++) { if (threadIdx.x == 0) thisrowkthcolumn = matrix[i * na + via]; __syncthreads(); if (i != j && i != via && j != via) matrix[i * na + j] = min(matrix[i * na + j], thisrowkthcolumn + matrix[via * na + j]); } } else { __shared__ float work[NS]; work[i * na + j] = matrix[i * na + j]; for (int via = Vi; via < (Vi + n); via++) { work[i * na + via] = matrix[i *na + via]; work[via * na + j] = matrix[via *na + j]; } __syncthreads(); for (int via = Vi; via < (Vi + n); via++) { if (i != j && j != via && i != via) work[i * na + j] = min(work[i * na + j], work[i * na + via] + work[via * na + j]); } __syncthreads(); for (int via = Vi; via < (Vi + n); via++) matrix[i * na + j] = work[i * na + j]; } } void F_loop_FW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n) { dim3 blocks_per_grid((n + THREADSPB - 1) / THREADSPB, n); FloydWarshall<<<blocks_per_grid, THREADSPB>>>(Xi, Xj, Ui, Uj, Vi, Vj, device_matrix, n, vertices); cudaThreadSynchronize(); } __global__ void A_FloydWarshall(int via, int from, int to, float *matrix, int n) { matrix[from * n + to] = min(matrix[from * n + to], matrix[from * n + via] + matrix[via * n + to]); } void A_F_loop_FW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n) { for(int via = Uj; via < Uj + n; via++) { for(int from = Xi; from < Xi + n; from++) { for(int to = Xj; to < Xj + n ; to++) { if(from!=to && from!=via && to!=via) { A_FloydWarshall<<<1, 1>>>(via, from, to, device_matrix, vertices); } } } } } /* void F_loop_FW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n) { for(int via = Uj; via < Uj + n; via++) { for(int from = Xi; from < Xi + n; from++) { for(int to = Xj; to < Xj + n ; to++) { if(from!=to && from!=via && to!=via) { dist[from * vertices + to] = min(dist[from * vertices + to], dist[from * vertices + via]+dist[via * vertices + to]); } } } } printarray(vertices); } */ void DFW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n, int d) { int r = tilesize[d]; if (n < r) F_loop_FW(Xi, Xj, Ui, Uj, Vi, Vj, n); else { for (int via = 0; via < r; via++) { int p = via * (n/r); for (int i = 0; i < r; i++) for (int j = 0; j < r; j++) { int ip = i * (n/r); int jp = j * (n/r); if (i != via && j != via) DFW(Xi + ip, Xj + jp, Ui + ip, Uj + p, Vi + p, Vj + jp, n/r, d + 1); } } } } void BFW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n, int d) { int r = tilesize[d]; if (n < r) F_loop_FW(Xi, Xj, Ui, Uj, Vi, Vj, n); else { for (int via = 0; via < r; via++) { int p = via * (n/r); for (int j = 0; j < r; j++) { int ip = j * (n/r); if (j != via) BFW(Xi + p, Xj + ip , Ui + p, Uj + p, Vi + p, Vj + ip, n/r, d + 1); } for (int i = 0; i < r; i++) for (int j = 0; j < r; j++) { int ip = i * (n/r); int jp = j * (n/r); if (i != via && j != via) DFW(Xi + ip, Xj + jp, Ui + ip, Uj + p, Vi + p, Vj + jp, n/r, d + 1); } } } } void CFW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n, int d) { int r = tilesize[d]; if (n < r) F_loop_FW(Xi, Xj, Ui, Uj, Vi, Vj, n); else { for (int via = 0; via < r; via++) { int p = via * (n/r); for (int j = 0; j < r; j++) { int ip = j * (n/r); if (j != via) CFW(Xi + ip, Xj + p , Ui + ip, Uj + p, Vi + p, Vj + p, n/r, d + 1); } for (int i = 0; i < r; i++) for (int j = 0; j < r; j++) { int ip = i * (n/r); int jp = j * (n/r); if (i != via && j != via) DFW(Xi + ip, Xj + jp, Ui + ip, Uj + p, Vi + p, Vj + jp, n/r, d + 1); } } } } void AFW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n, int d) { int r = tilesize[d]; if (n < r) A_F_loop_FW(Xi, Xj, Ui, Uj, Vi, Vj, n); else { for (int via = 0; via < r; via++) { int p = via * (n/r); AFW(Xi + p, Xj + p, Ui + p, Uj + p, Vi + p, Vj + p, n/r, d + 1); for (int j = 0; j < r; j++) { int ip = j * (n/r); if (j != via) BFW(Xi + p, Xj + ip , Ui + p, Uj + p, Vi + p, Vj + ip, n/r, d + 1); } for (int j = 0; j < r; j++) { int ip = j * (n/r); if (j != via) CFW(Xi + ip, Xj + p , Ui + ip, Uj + p, Vi + p, Vj + p, n/r, d + 1); } for (int i = 0; i < r; i++) for (int j = 0; j < r; j++) { int ip = i * (n/r); int jp = j * (n/r); if (i != via && j != via) DFW(Xi + ip, Xj + jp, Ui + ip, Uj + p, Vi + p, Vj + jp, n/r, d + 1); } } } } int main(int argc, char *argv[]) { char *arg_vertices = getenv("N_VERTICES"); vertices = atoi(arg_vertices); tilesize[0] = 2; tilesize[1] = vertices/NS; tilesize[2] = INF; for(int i = 0 ; i < vertices ; i++ ) { for(int j = 0 ; j< vertices; j++ ) { if( i == j ) dist[i * vertices + j] = 0; else { int num = i + j; if (num % 3 == 0) dist[i * vertices + j] = num / 2; else if (num % 2 == 0) dist[i * vertices + j] = num * 2; else dist[i * vertices + j] = num; } } } struct timeval tvalBefore, tvalAfter; tot = vertices * vertices * sizeof(float); device_matrix = NULL; cudaMalloc((float **)&device_matrix, tot); cudaMemcpy(device_matrix, dist, tot, cudaMemcpyHostToDevice); result_matrix =(float *)malloc( vertices * vertices * sizeof(float)); gettimeofday (&tvalBefore, NULL); AFW(0, 0, 0, 0, 0, 0, vertices, 0); cudaMemcpy(result_matrix, device_matrix, tot, cudaMemcpyDeviceToHost); gettimeofday (&tvalAfter, NULL); printf("Time: %ld microseconds\n", ((tvalAfter.tv_sec - tvalBefore.tv_sec)*1000000L +tvalAfter.tv_usec) - tvalBefore.tv_usec ); for(int i = 0 ; i < vertices; i++ ) { cout << "\n"; for(int j = 0 ; j< vertices ;j++ ) cout << result_matrix[i * vertices + j] << " " ; } return 0; }
a2c24b38b12cfd0a575c20122a16cf1c8d6d7e18.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <functional> #include <layers/batch_norm_layer.hpp> #include <string> #include <utils.hpp> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { namespace { template <typename T> using ToStringType = typename std::conditional<std::is_same<T, __half>::value, float, T>::type; } template <typename T> BatchNormLayer<T>::BatchNormLayer(const std::shared_ptr<BufferBlock2<float>>& weight_buff, const std::shared_ptr<BufferBlock2<float>>& wgrad_buff, const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blob_buff, const Tensor2<T>& in_tensor, const Tensor2<T>& out_tensor, const Params& params, const std::shared_ptr<GPUResource>& gpu_resource, std::vector<Initializer_t> initializer_types) : Layer(gpu_resource, initializer_types), params_(params), mode_(CUDNN_BATCHNORM_PER_ACTIVATION) { CudaDeviceContext context(get_device_id()); const auto& in_tensor_dim = in_tensor.get_dimensions(); const auto& out_tensor_dim = out_tensor.get_dimensions(); assert(get_size_from_dims(in_tensor_dim) == get_size_from_dims(out_tensor_dim)); assert(in_tensor_dim.size() == 2 && out_tensor_dim.size() == 2); assert(in_tensor_dim[0] == out_tensor_dim[0]); assert(in_tensor_dim[1] == out_tensor_dim[1]); CK_CUDNN_THROW_(cudnnCreateTensorDescriptor(&in_out_desc_)); size_t num_feature = in_tensor_dim[1]; int batch_size = in_tensor_dim[0]; cudnnDataType_t data_type = std::is_same<T, __half>::value ? CUDNN_DATA_HALF : CUDNN_DATA_FLOAT; int n_stride = num_feature; int w_stride = 1; CK_CUDNN_THROW_(cudnnSetTensor4dDescriptorEx(in_out_desc_, data_type, batch_size, 1, 1, num_feature, n_stride, 1, 1, w_stride)); in_tensors_.push_back(in_tensor); out_tensors_.push_back(out_tensor); CK_CUDNN_THROW_(cudnnCreateTensorDescriptor(&gamma_beta_desc_)); CK_CUDNN_THROW_(cudnnDeriveBNTensorDescriptor(gamma_beta_desc_, in_out_desc_, mode_)); std::vector<size_t> gamma_dim = {num_feature, 1}; // gamma & beta weight_buff->reserve(gamma_dim, &gamma_); weight_buff->reserve(gamma_dim, &beta_); weights_.push_back(gamma_); weights_.push_back(beta_); // gamma grad & beta grad wgrad_buff->reserve(gamma_dim, &gamma_grad_); wgrad_buff->reserve(gamma_dim, &beta_grad_); wgrad_.push_back(gamma_grad_); wgrad_.push_back(beta_grad_); blob_buff->reserve(in_tensor_dim, &temp_in_tensor_); // result running mean & var blob_buff->reserve(gamma_dim, &result_running_mean_); blob_buff->reserve(gamma_dim, &result_running_var_); // save running mean & var (cache) blob_buff->reserve(gamma_dim, &result_save_mean_); blob_buff->reserve(gamma_dim, &result_save_inv_var_); } template <typename T> BatchNormLayer<T>::~BatchNormLayer() { try { CK_CUDNN_THROW_(cudnnDestroyTensorDescriptor(in_out_desc_)); CK_CUDNN_THROW_(cudnnDestroyTensorDescriptor(gamma_beta_desc_)); } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; } } template <typename T> void BatchNormLayer<T>::initialize() { // host array to get running mean & var size_t num_feature = in_tensors_[0].get_dimensions()[1]; std::shared_ptr<GeneralBuffer2<HostAllocator>> internal_host_buf = GeneralBuffer2<HostAllocator>::create(); internal_host_buf->reserve({num_feature}, &h_result_running_mean_); internal_host_buf->reserve({num_feature}, &h_result_running_var_); internal_host_buf->allocate(); } template <typename T> void BatchNormLayer<T>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); float one = 1.0f, zero = 0.0f; Tensor2<T>& in_tensor = in_tensors_[0]; Tensor2<T>& out_tensor = out_tensors_[0]; T* in = in_tensor.get_ptr(); T* out = out_tensor.get_ptr(); float* gamma = gamma_.get_ptr(); float* beta = beta_.get_ptr(); float* result_running_mean = result_running_mean_.get_ptr(); float* result_running_var = result_running_var_.get_ptr(); float* result_save_mean = result_save_mean_.get_ptr(); float* result_save_inv_var = result_save_inv_var_.get_ptr(); if (is_train) { CK_CUDNN_THROW_(cudnnBatchNormalizationForwardTraining( get_gpu().get_cudnn_handle(), mode_, &one, &zero, in_out_desc_, in, in_out_desc_, out, gamma_beta_desc_, gamma, beta, params_.factor, result_running_mean, result_running_var, params_.eps, result_save_mean, result_save_inv_var)); } else { CK_CUDNN_THROW_(cudnnBatchNormalizationForwardInference( get_gpu().get_cudnn_handle(), mode_, &one, &zero, in_out_desc_, in, in_out_desc_, out, gamma_beta_desc_, gamma, beta, result_running_mean, result_running_var, params_.eps)); } } template <typename T> void BatchNormLayer<T>::bprop() { CudaDeviceContext context(get_device_id()); float one = 1.0f, zero = 0.0f; Tensor2<T>& in_tensor = in_tensors_[0]; Tensor2<T>& out_tensor = out_tensors_[0]; T* in = in_tensor.get_ptr(); T* out = out_tensor.get_ptr(); float* gamma = gamma_.get_ptr(); float* gamma_grad = gamma_grad_.get_ptr(); float* beta_grad = beta_grad_.get_ptr(); float* result_save_mean = result_save_mean_.get_ptr(); float* result_save_inv_var = result_save_inv_var_.get_ptr(); T* temp_in = temp_in_tensor_.get_ptr(); size_t n_byte = temp_in_tensor_.get_size_in_bytes(); CK_CUDA_THROW_( hipMemcpyAsync(temp_in, in, n_byte, hipMemcpyDeviceToDevice, get_gpu().get_stream())); CK_CUDNN_THROW_(cudnnBatchNormalizationBackward( get_gpu().get_cudnn_handle(), mode_, &one, &zero, &one, &zero, in_out_desc_, temp_in, in_out_desc_, out, in_out_desc_, in, gamma_beta_desc_, gamma, gamma_grad, beta_grad, params_.eps, result_save_mean, result_save_inv_var)); } template <typename T> std::string BatchNormLayer<T>::get_no_trained_params_in_string() { float* d_result_running_mean = result_running_mean_.get_ptr(); float* d_result_running_var = result_running_var_.get_ptr(); size_t n_byte = result_running_mean_.get_size_in_bytes(); size_t n_elem = n_byte / sizeof(T); CK_CUDA_THROW_(hipMemcpy(h_result_running_mean_.get_ptr(), d_result_running_mean, n_byte, hipMemcpyDeviceToHost)); CK_CUDA_THROW_(hipMemcpy(h_result_running_var_.get_ptr(), d_result_running_var, n_byte, hipMemcpyDeviceToHost)); std::string result = " \"type\": \"BatchNorm\",\n"; result += " \"mean\": ["; for (size_t i = 0; i < n_elem; i++) { result += std::to_string(ToStringType<T>(h_result_running_mean_.get_ptr()[i])); if (i != (n_elem - 1)) result += ", "; } result += "],\n"; result += " \"var\": ["; for (size_t i = 0; i < n_elem; i++) { result += std::to_string(ToStringType<T>(h_result_running_var_.get_ptr()[i])); if (i != (n_elem - 1)) result += ", "; } result += "]"; return result; } template <typename T> std::unique_ptr<DataSimulator> BatchNormLayer<T>::get_default_initializer(const int index) { std::unique_ptr<DataSimulator> simu; if (0 == index) { simu.reset(new ConstantDataSimulator(1.0f)); } else if (1 == index) { simu.reset(new ConstantDataSimulator(0.0f)); } else { CK_THROW_(Error_t::OutOfBound, "index != {0, 1}."); } return simu; } template class BatchNormLayer<float>; template class BatchNormLayer<__half>; } // namespace HugeCTR
a2c24b38b12cfd0a575c20122a16cf1c8d6d7e18.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <functional> #include <layers/batch_norm_layer.hpp> #include <string> #include <utils.hpp> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { namespace { template <typename T> using ToStringType = typename std::conditional<std::is_same<T, __half>::value, float, T>::type; } template <typename T> BatchNormLayer<T>::BatchNormLayer(const std::shared_ptr<BufferBlock2<float>>& weight_buff, const std::shared_ptr<BufferBlock2<float>>& wgrad_buff, const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blob_buff, const Tensor2<T>& in_tensor, const Tensor2<T>& out_tensor, const Params& params, const std::shared_ptr<GPUResource>& gpu_resource, std::vector<Initializer_t> initializer_types) : Layer(gpu_resource, initializer_types), params_(params), mode_(CUDNN_BATCHNORM_PER_ACTIVATION) { CudaDeviceContext context(get_device_id()); const auto& in_tensor_dim = in_tensor.get_dimensions(); const auto& out_tensor_dim = out_tensor.get_dimensions(); assert(get_size_from_dims(in_tensor_dim) == get_size_from_dims(out_tensor_dim)); assert(in_tensor_dim.size() == 2 && out_tensor_dim.size() == 2); assert(in_tensor_dim[0] == out_tensor_dim[0]); assert(in_tensor_dim[1] == out_tensor_dim[1]); CK_CUDNN_THROW_(cudnnCreateTensorDescriptor(&in_out_desc_)); size_t num_feature = in_tensor_dim[1]; int batch_size = in_tensor_dim[0]; cudnnDataType_t data_type = std::is_same<T, __half>::value ? CUDNN_DATA_HALF : CUDNN_DATA_FLOAT; int n_stride = num_feature; int w_stride = 1; CK_CUDNN_THROW_(cudnnSetTensor4dDescriptorEx(in_out_desc_, data_type, batch_size, 1, 1, num_feature, n_stride, 1, 1, w_stride)); in_tensors_.push_back(in_tensor); out_tensors_.push_back(out_tensor); CK_CUDNN_THROW_(cudnnCreateTensorDescriptor(&gamma_beta_desc_)); CK_CUDNN_THROW_(cudnnDeriveBNTensorDescriptor(gamma_beta_desc_, in_out_desc_, mode_)); std::vector<size_t> gamma_dim = {num_feature, 1}; // gamma & beta weight_buff->reserve(gamma_dim, &gamma_); weight_buff->reserve(gamma_dim, &beta_); weights_.push_back(gamma_); weights_.push_back(beta_); // gamma grad & beta grad wgrad_buff->reserve(gamma_dim, &gamma_grad_); wgrad_buff->reserve(gamma_dim, &beta_grad_); wgrad_.push_back(gamma_grad_); wgrad_.push_back(beta_grad_); blob_buff->reserve(in_tensor_dim, &temp_in_tensor_); // result running mean & var blob_buff->reserve(gamma_dim, &result_running_mean_); blob_buff->reserve(gamma_dim, &result_running_var_); // save running mean & var (cache) blob_buff->reserve(gamma_dim, &result_save_mean_); blob_buff->reserve(gamma_dim, &result_save_inv_var_); } template <typename T> BatchNormLayer<T>::~BatchNormLayer() { try { CK_CUDNN_THROW_(cudnnDestroyTensorDescriptor(in_out_desc_)); CK_CUDNN_THROW_(cudnnDestroyTensorDescriptor(gamma_beta_desc_)); } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; } } template <typename T> void BatchNormLayer<T>::initialize() { // host array to get running mean & var size_t num_feature = in_tensors_[0].get_dimensions()[1]; std::shared_ptr<GeneralBuffer2<HostAllocator>> internal_host_buf = GeneralBuffer2<HostAllocator>::create(); internal_host_buf->reserve({num_feature}, &h_result_running_mean_); internal_host_buf->reserve({num_feature}, &h_result_running_var_); internal_host_buf->allocate(); } template <typename T> void BatchNormLayer<T>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); float one = 1.0f, zero = 0.0f; Tensor2<T>& in_tensor = in_tensors_[0]; Tensor2<T>& out_tensor = out_tensors_[0]; T* in = in_tensor.get_ptr(); T* out = out_tensor.get_ptr(); float* gamma = gamma_.get_ptr(); float* beta = beta_.get_ptr(); float* result_running_mean = result_running_mean_.get_ptr(); float* result_running_var = result_running_var_.get_ptr(); float* result_save_mean = result_save_mean_.get_ptr(); float* result_save_inv_var = result_save_inv_var_.get_ptr(); if (is_train) { CK_CUDNN_THROW_(cudnnBatchNormalizationForwardTraining( get_gpu().get_cudnn_handle(), mode_, &one, &zero, in_out_desc_, in, in_out_desc_, out, gamma_beta_desc_, gamma, beta, params_.factor, result_running_mean, result_running_var, params_.eps, result_save_mean, result_save_inv_var)); } else { CK_CUDNN_THROW_(cudnnBatchNormalizationForwardInference( get_gpu().get_cudnn_handle(), mode_, &one, &zero, in_out_desc_, in, in_out_desc_, out, gamma_beta_desc_, gamma, beta, result_running_mean, result_running_var, params_.eps)); } } template <typename T> void BatchNormLayer<T>::bprop() { CudaDeviceContext context(get_device_id()); float one = 1.0f, zero = 0.0f; Tensor2<T>& in_tensor = in_tensors_[0]; Tensor2<T>& out_tensor = out_tensors_[0]; T* in = in_tensor.get_ptr(); T* out = out_tensor.get_ptr(); float* gamma = gamma_.get_ptr(); float* gamma_grad = gamma_grad_.get_ptr(); float* beta_grad = beta_grad_.get_ptr(); float* result_save_mean = result_save_mean_.get_ptr(); float* result_save_inv_var = result_save_inv_var_.get_ptr(); T* temp_in = temp_in_tensor_.get_ptr(); size_t n_byte = temp_in_tensor_.get_size_in_bytes(); CK_CUDA_THROW_( cudaMemcpyAsync(temp_in, in, n_byte, cudaMemcpyDeviceToDevice, get_gpu().get_stream())); CK_CUDNN_THROW_(cudnnBatchNormalizationBackward( get_gpu().get_cudnn_handle(), mode_, &one, &zero, &one, &zero, in_out_desc_, temp_in, in_out_desc_, out, in_out_desc_, in, gamma_beta_desc_, gamma, gamma_grad, beta_grad, params_.eps, result_save_mean, result_save_inv_var)); } template <typename T> std::string BatchNormLayer<T>::get_no_trained_params_in_string() { float* d_result_running_mean = result_running_mean_.get_ptr(); float* d_result_running_var = result_running_var_.get_ptr(); size_t n_byte = result_running_mean_.get_size_in_bytes(); size_t n_elem = n_byte / sizeof(T); CK_CUDA_THROW_(cudaMemcpy(h_result_running_mean_.get_ptr(), d_result_running_mean, n_byte, cudaMemcpyDeviceToHost)); CK_CUDA_THROW_(cudaMemcpy(h_result_running_var_.get_ptr(), d_result_running_var, n_byte, cudaMemcpyDeviceToHost)); std::string result = " \"type\": \"BatchNorm\",\n"; result += " \"mean\": ["; for (size_t i = 0; i < n_elem; i++) { result += std::to_string(ToStringType<T>(h_result_running_mean_.get_ptr()[i])); if (i != (n_elem - 1)) result += ", "; } result += "],\n"; result += " \"var\": ["; for (size_t i = 0; i < n_elem; i++) { result += std::to_string(ToStringType<T>(h_result_running_var_.get_ptr()[i])); if (i != (n_elem - 1)) result += ", "; } result += "]"; return result; } template <typename T> std::unique_ptr<DataSimulator> BatchNormLayer<T>::get_default_initializer(const int index) { std::unique_ptr<DataSimulator> simu; if (0 == index) { simu.reset(new ConstantDataSimulator(1.0f)); } else if (1 == index) { simu.reset(new ConstantDataSimulator(0.0f)); } else { CK_THROW_(Error_t::OutOfBound, "index != {0, 1}."); } return simu; } template class BatchNormLayer<float>; template class BatchNormLayer<__half>; } // namespace HugeCTR
351db6283634b941fd9dcb7981d538405766bad1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <cfloat> #include <string> #include <vector> #include "hipcub/hipcub.hpp" #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/batch_norm_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/float16.h" // CUDNN_BATCHNORM_SPATIAL_PERSISTENT in batchnorm. This mode can be faster in // some tasks because an optimized path may be selected for CUDNN_DATA_FLOAT // and CUDNN_DATA_HALF data types, compute capability 6.0 or higher. The // reason we set it to false by default is that this mode may use scaled // atomic integer reduction that may cause a numerical overflow for certain // input data range. DEFINE_bool(cudnn_batchnorm_spatial_persistent, false, "Whether enable CUDNN_BATCHNORM_SPATIAL_PERSISTENT mode for cudnn " "batch_norm, default is False."); namespace paddle { namespace operators { using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; template <typename T> using CudnnDataType = platform::CudnnDataType<T>; template <typename T> using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType; template <typename T> class BatchNormKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "It must use CUDAPlace."); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); const float momentum = ctx.Attr<float>("momentum"); const bool is_test = ctx.Attr<bool>("is_test"); const bool use_global_stats = ctx.Attr<bool>("use_global_stats"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); // Get the size for each dimension. // NCHW [batch_size, in_channels, in_height, in_width] const auto *x = ctx.Input<Tensor>("X"); const auto &x_dims = x->dims(); PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, "The Input dim size should be between 2 and 5"); int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); auto *y = ctx.Output<Tensor>("Y"); y->mutable_data<T>(ctx.GetPlace()); // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; CUDNN_ENFORCE(platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); CUDNN_ENFORCE( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON); #if CUDNN_VERSION_MIN(7, 0, 0) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else mode_ = CUDNN_BATCHNORM_SPATIAL; #endif VLOG(3) << "Setting descriptors."; std::vector<int> dims; std::vector<int> strides; if (data_layout == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * D * C, 1, W * D * C, D * C, C}; } CUDNN_ENFORCE(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); // Note: PERSISTENT not implemented for inference CUDNN_ENFORCE(platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, is_test ? CUDNN_BATCHNORM_SPATIAL : mode_)); const auto *scale = ctx.Input<Tensor>("Scale"); const auto *bias = ctx.Input<Tensor>("Bias"); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto handle = dev_ctx.cudnn_handle(); // Now, depending on whether we are running test or not, we have two paths. if (is_test || use_global_stats) { // only when test we use input to do computation. const auto *est_mean = ctx.Input<Tensor>("Mean"); const auto *est_var = ctx.Input<Tensor>("Variance"); // Run inference mode. PADDLE_ENFORCE_EQ(est_mean->dims().size(), 1UL); PADDLE_ENFORCE_EQ(est_var->dims().size(), 1UL); PADDLE_ENFORCE_EQ(est_mean->dims()[0], C); PADDLE_ENFORCE_EQ(est_var->dims()[0], C); CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationForwardInference( handle, // Note: PERSISTENT not implemented for inference CUDNN_BATCHNORM_SPATIAL, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(), data_desc_, y->template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), est_mean->template data<BatchNormParamType<T>>(), est_var->template data<BatchNormParamType<T>>(), epsilon)); } else { // Run training mode. // obtain running mean and running inv var, and see if we need to // initialize them. auto *mean_out = ctx.Output<Tensor>("MeanOut"); auto *variance_out = ctx.Output<Tensor>("VarianceOut"); mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); auto *saved_mean = ctx.Output<Tensor>("SavedMean"); auto *saved_variance = ctx.Output<Tensor>("SavedVariance"); saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>> functor; functor(dev_ctx, saved_mean, static_cast<BatchNormParamType<T>>(0)); functor(dev_ctx, saved_variance, static_cast<BatchNormParamType<T>>(0)); if ((N * H * W * D) == 1) { LOG(WARNING) << "Only 1 element in normalization dimension, " << "we skip the batch norm calculation, let y = x."; framework::TensorCopy(*x, ctx.GetPlace(), y); } else { double this_factor = 1. - momentum; CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationForwardTraining( handle, mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(), data_desc_, y->template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), this_factor, mean_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), variance_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), epsilon, saved_mean->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), saved_variance->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()))); } } // clean when exit. CUDNN_ENFORCE(platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); CUDNN_ENFORCE( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; template <typename T, int BlockDim, framework::DataLayout layout> static __global__ void KeBNBackwardScaleBias( const T *dy, const T *x, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, const double epsilon, const int N, const int C, const int HxW, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon); BatchNormParamType<T> mean_i = mean[i]; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); db_sum += static_cast<BatchNormParamType<T>>(dy[index]); } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum()); if (threadIdx.x == 0) { dscale[i] = ds_sum * inv_var_i; dbias[i] = db_sum; } __syncthreads(); } } template <typename T, framework::DataLayout layout> static __global__ void KeBNBackwardData(const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *variance, const double epsilon, const int C, const int HxW, const int num, T *dx) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C; BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon); dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) * scale[c] * inv_var); } } template <typename T> class BatchNormGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "It must use CUDAPlace."); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const bool use_global_stats = ctx.Attr<bool>("use_global_stats"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); const auto *x = ctx.Input<Tensor>("X"); const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); const auto *scale = ctx.Input<Tensor>("Scale"); const auto &x_dims = x->dims(); PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, "The Input dim size should be between 2 and 5"); int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); // init output auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale")); auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias")); d_x->mutable_data<T>(ctx.GetPlace()); if (d_scale && d_bias) { d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); } PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL); PADDLE_ENFORCE_EQ(scale->dims()[0], C); std::vector<int> dims; std::vector<int> strides; if (data_layout == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * C * D, 1, W * D * C, D * C, C}; } auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); if (!use_global_stats) { if ((N * H * W * D) == 1) { framework::TensorCopy(*d_y, ctx.GetPlace(), d_x); math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>> functor; functor(dev_ctx, d_scale, static_cast<BatchNormParamType<T>>(0)); functor(dev_ctx, d_bias, static_cast<BatchNormParamType<T>>(0)); return; } // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; CUDNN_ENFORCE( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); CUDNN_ENFORCE( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON); #if CUDNN_VERSION_MIN(7, 0, 0) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else mode_ = CUDNN_BATCHNORM_SPATIAL; #endif CUDNN_ENFORCE(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); CUDNN_ENFORCE(platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); const auto *saved_mean = ctx.Input<Tensor>("SavedMean"); const auto *saved_var = ctx.Input<Tensor>("SavedVariance"); const void *saved_mean_data = saved_mean->template data<BatchNormParamType<T>>(); const void *saved_var_data = saved_var->template data<BatchNormParamType<T>>(); CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationBackward( dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(), data_desc_, d_y->template data<T>(), data_desc_, d_x->template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), d_scale->template mutable_data<BatchNormParamType<T>>(ctx.GetPlace()), d_bias->template mutable_data<BatchNormParamType<T>>(ctx.GetPlace()), epsilon, saved_mean_data, saved_var_data)); // clean when exit. CUDNN_ENFORCE( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); CUDNN_ENFORCE( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } else { const auto *running_mean = ctx.Input<Tensor>("Mean"); const auto *running_var = ctx.Input<Tensor>("Variance"); const auto *running_mean_data = running_mean->template data<BatchNormParamType<T>>(); const auto *running_var_data = running_var->template data<BatchNormParamType<T>>(); const int num = x->numel(); const int block = 512; int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = ::max(max_threads / block, 1); int grid1 = (num + block - 1) / block; int grid2 = ::min(C, max_blocks); if (data_layout == framework::DataLayout::kNCHW) { if (d_x) { hipLaunchKernelGGL(( KeBNBackwardData<T, framework::DataLayout::kNCHW>), dim3(grid1), dim3(block), 0, dev_ctx.stream(), d_y->data<T>(), scale->data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, framework::DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, dev_ctx.stream(), d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { hipLaunchKernelGGL(( KeBNBackwardData<T, framework::DataLayout::kNHWC>), dim3(grid1), dim3(block), 0, dev_ctx.stream(), d_y->data<T>(), scale->data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, framework::DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, dev_ctx.stream(), d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( batch_norm, ops::BatchNormKernel<plat::CUDADeviceContext, float>, ops::BatchNormKernel<plat::CUDADeviceContext, double>, ops::BatchNormKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( batch_norm_grad, ops::BatchNormGradKernel<plat::CUDADeviceContext, float>, ops::BatchNormGradKernel<plat::CUDADeviceContext, double>, ops::BatchNormGradKernel<plat::CUDADeviceContext, plat::float16>);
351db6283634b941fd9dcb7981d538405766bad1.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <cfloat> #include <string> #include <vector> #include "cub/cub.cuh" #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/batch_norm_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/float16.h" // CUDNN_BATCHNORM_SPATIAL_PERSISTENT in batchnorm. This mode can be faster in // some tasks because an optimized path may be selected for CUDNN_DATA_FLOAT // and CUDNN_DATA_HALF data types, compute capability 6.0 or higher. The // reason we set it to false by default is that this mode may use scaled // atomic integer reduction that may cause a numerical overflow for certain // input data range. DEFINE_bool(cudnn_batchnorm_spatial_persistent, false, "Whether enable CUDNN_BATCHNORM_SPATIAL_PERSISTENT mode for cudnn " "batch_norm, default is False."); namespace paddle { namespace operators { using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; template <typename T> using CudnnDataType = platform::CudnnDataType<T>; template <typename T> using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType; template <typename T> class BatchNormKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "It must use CUDAPlace."); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); const float momentum = ctx.Attr<float>("momentum"); const bool is_test = ctx.Attr<bool>("is_test"); const bool use_global_stats = ctx.Attr<bool>("use_global_stats"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); // Get the size for each dimension. // NCHW [batch_size, in_channels, in_height, in_width] const auto *x = ctx.Input<Tensor>("X"); const auto &x_dims = x->dims(); PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, "The Input dim size should be between 2 and 5"); int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); auto *y = ctx.Output<Tensor>("Y"); y->mutable_data<T>(ctx.GetPlace()); // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; CUDNN_ENFORCE(platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); CUDNN_ENFORCE( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); #if CUDNN_VERSION_MIN(7, 0, 0) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else mode_ = CUDNN_BATCHNORM_SPATIAL; #endif VLOG(3) << "Setting descriptors."; std::vector<int> dims; std::vector<int> strides; if (data_layout == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * D * C, 1, W * D * C, D * C, C}; } CUDNN_ENFORCE(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); // Note: PERSISTENT not implemented for inference CUDNN_ENFORCE(platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, is_test ? CUDNN_BATCHNORM_SPATIAL : mode_)); const auto *scale = ctx.Input<Tensor>("Scale"); const auto *bias = ctx.Input<Tensor>("Bias"); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto handle = dev_ctx.cudnn_handle(); // Now, depending on whether we are running test or not, we have two paths. if (is_test || use_global_stats) { // only when test we use input to do computation. const auto *est_mean = ctx.Input<Tensor>("Mean"); const auto *est_var = ctx.Input<Tensor>("Variance"); // Run inference mode. PADDLE_ENFORCE_EQ(est_mean->dims().size(), 1UL); PADDLE_ENFORCE_EQ(est_var->dims().size(), 1UL); PADDLE_ENFORCE_EQ(est_mean->dims()[0], C); PADDLE_ENFORCE_EQ(est_var->dims()[0], C); CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationForwardInference( handle, // Note: PERSISTENT not implemented for inference CUDNN_BATCHNORM_SPATIAL, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(), data_desc_, y->template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), est_mean->template data<BatchNormParamType<T>>(), est_var->template data<BatchNormParamType<T>>(), epsilon)); } else { // Run training mode. // obtain running mean and running inv var, and see if we need to // initialize them. auto *mean_out = ctx.Output<Tensor>("MeanOut"); auto *variance_out = ctx.Output<Tensor>("VarianceOut"); mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); auto *saved_mean = ctx.Output<Tensor>("SavedMean"); auto *saved_variance = ctx.Output<Tensor>("SavedVariance"); saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>> functor; functor(dev_ctx, saved_mean, static_cast<BatchNormParamType<T>>(0)); functor(dev_ctx, saved_variance, static_cast<BatchNormParamType<T>>(0)); if ((N * H * W * D) == 1) { LOG(WARNING) << "Only 1 element in normalization dimension, " << "we skip the batch norm calculation, let y = x."; framework::TensorCopy(*x, ctx.GetPlace(), y); } else { double this_factor = 1. - momentum; CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationForwardTraining( handle, mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(), data_desc_, y->template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), this_factor, mean_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), variance_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), epsilon, saved_mean->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), saved_variance->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()))); } } // clean when exit. CUDNN_ENFORCE(platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); CUDNN_ENFORCE( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; template <typename T, int BlockDim, framework::DataLayout layout> static __global__ void KeBNBackwardScaleBias( const T *dy, const T *x, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, const double epsilon, const int N, const int C, const int HxW, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon); BatchNormParamType<T> mean_i = mean[i]; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); db_sum += static_cast<BatchNormParamType<T>>(dy[index]); } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum()); if (threadIdx.x == 0) { dscale[i] = ds_sum * inv_var_i; dbias[i] = db_sum; } __syncthreads(); } } template <typename T, framework::DataLayout layout> static __global__ void KeBNBackwardData(const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *variance, const double epsilon, const int C, const int HxW, const int num, T *dx) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C; BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon); dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) * scale[c] * inv_var); } } template <typename T> class BatchNormGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "It must use CUDAPlace."); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const bool use_global_stats = ctx.Attr<bool>("use_global_stats"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); const auto *x = ctx.Input<Tensor>("X"); const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); const auto *scale = ctx.Input<Tensor>("Scale"); const auto &x_dims = x->dims(); PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, "The Input dim size should be between 2 and 5"); int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); // init output auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale")); auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias")); d_x->mutable_data<T>(ctx.GetPlace()); if (d_scale && d_bias) { d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); } PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL); PADDLE_ENFORCE_EQ(scale->dims()[0], C); std::vector<int> dims; std::vector<int> strides; if (data_layout == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * C * D, 1, W * D * C, D * C, C}; } auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); if (!use_global_stats) { if ((N * H * W * D) == 1) { framework::TensorCopy(*d_y, ctx.GetPlace(), d_x); math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>> functor; functor(dev_ctx, d_scale, static_cast<BatchNormParamType<T>>(0)); functor(dev_ctx, d_bias, static_cast<BatchNormParamType<T>>(0)); return; } // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; CUDNN_ENFORCE( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); CUDNN_ENFORCE( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); #if CUDNN_VERSION_MIN(7, 0, 0) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else mode_ = CUDNN_BATCHNORM_SPATIAL; #endif CUDNN_ENFORCE(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); CUDNN_ENFORCE(platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); const auto *saved_mean = ctx.Input<Tensor>("SavedMean"); const auto *saved_var = ctx.Input<Tensor>("SavedVariance"); const void *saved_mean_data = saved_mean->template data<BatchNormParamType<T>>(); const void *saved_var_data = saved_var->template data<BatchNormParamType<T>>(); CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationBackward( dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(), data_desc_, d_y->template data<T>(), data_desc_, d_x->template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), d_scale->template mutable_data<BatchNormParamType<T>>(ctx.GetPlace()), d_bias->template mutable_data<BatchNormParamType<T>>(ctx.GetPlace()), epsilon, saved_mean_data, saved_var_data)); // clean when exit. CUDNN_ENFORCE( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); CUDNN_ENFORCE( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } else { const auto *running_mean = ctx.Input<Tensor>("Mean"); const auto *running_var = ctx.Input<Tensor>("Variance"); const auto *running_mean_data = running_mean->template data<BatchNormParamType<T>>(); const auto *running_var_data = running_var->template data<BatchNormParamType<T>>(); const int num = x->numel(); const int block = 512; int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = std::max(max_threads / block, 1); int grid1 = (num + block - 1) / block; int grid2 = std::min(C, max_blocks); if (data_layout == framework::DataLayout::kNCHW) { if (d_x) { KeBNBackwardData<T, framework::DataLayout::kNCHW><<< grid1, block, 0, dev_ctx.stream()>>>( d_y->data<T>(), scale->data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias<T, block, framework::DataLayout::kNCHW><<< grid2, block, 0, dev_ctx.stream()>>>( d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { KeBNBackwardData<T, framework::DataLayout::kNHWC><<< grid1, block, 0, dev_ctx.stream()>>>( d_y->data<T>(), scale->data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias<T, block, framework::DataLayout::kNHWC><<< grid2, block, 0, dev_ctx.stream()>>>( d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( batch_norm, ops::BatchNormKernel<plat::CUDADeviceContext, float>, ops::BatchNormKernel<plat::CUDADeviceContext, double>, ops::BatchNormKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( batch_norm_grad, ops::BatchNormGradKernel<plat::CUDADeviceContext, float>, ops::BatchNormGradKernel<plat::CUDADeviceContext, double>, ops::BatchNormGradKernel<plat::CUDADeviceContext, plat::float16>);
9b3c003ba38dc1e4e272552f68c7cdcce8de86fb.hip
// !!! This is a file automatically generated by hipify!!! //pass //--blockDim=256 --gridDim=1 --no-inline #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> //#include <hiprand/hiprand_kernel.h> //#include <hiprand/hiprand_mtgp32_host.h> #include <stdio.h> #define N 256 __global__ void curand_test(hiprandStateMtgp32_t *state, float *A) { A[threadIdx.x] = hiprand(&state[threadIdx.x]); } int main() { typedef hiprandStateMtgp32_t tipo; // Mtgp32_t float *a; float *dev_a; tipo *dev_state; mtgp32_kernel_params_t *devKernelParams; int size = N*sizeof(float); a = (float*)malloc(size); hipMalloc ((void**) &dev_a, size); printf("old a: "); for (int i = 0; i < N; i++) printf("%f ", a[i]); hipMalloc ( (void**) &dev_state, N*sizeof( tipo ) ); hipMalloc((void**)&devKernelParams,sizeof(mtgp32_kernel_params_t)); hiprandMakeMTGP32Constants(mtgp32dc_params_fast_11213, devKernelParams); /* Set up constant parameters for the mtgp32 generator */ /* mtgp32dc_params_fast_11213 is a constant of the type mtgp32_params_fast, it is a system constant */ /* devKernelParams is the destination*/ hiprandMakeMTGP32KernelState(dev_state, mtgp32dc_params_fast_11213, devKernelParams,N, 1234); /* Set up initial states for the mtgp32 generator */ /* * \param s - pointer to an array of states in device memory * \param params - Pointer to an array of type mtgp32_params_fast_t in host memory * \param k - pointer to a structure of type mtgp32_kernel_params_t in device memory * \param n - number of parameter sets/states to initialize * \param seed - seed value * * */ hipLaunchKernelGGL(( curand_test), dim3(1),dim3(N), 0, 0, dev_state, dev_a); //ESBMC_verify_kernel(curand_test,1,N,dev_state, dev_a); hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost); printf("\nnew a: "); for (int i = 0; i < N; i++) printf("%f ", a[i]); free(a); hipFree(&dev_a); hipFree(&dev_state); hipFree(&devKernelParams); return 0; }
9b3c003ba38dc1e4e272552f68c7cdcce8de86fb.cu
//pass //--blockDim=256 --gridDim=1 --no-inline #include <cuda.h> #include <curand.h> #include <curand_kernel.h> //#include <curand_precalc.h> //#include <curand_mtgp32_host.h> #include <stdio.h> #define N 256 __global__ void curand_test(curandStateMtgp32_t *state, float *A) { A[threadIdx.x] = curand(&state[threadIdx.x]); } int main() { typedef curandStateMtgp32_t tipo; // Mtgp32_t float *a; float *dev_a; tipo *dev_state; mtgp32_kernel_params *devKernelParams; int size = N*sizeof(float); a = (float*)malloc(size); cudaMalloc ((void**) &dev_a, size); printf("old a: "); for (int i = 0; i < N; i++) printf("%f ", a[i]); cudaMalloc ( (void**) &dev_state, N*sizeof( tipo ) ); cudaMalloc((void**)&devKernelParams,sizeof(mtgp32_kernel_params)); curandMakeMTGP32Constants(mtgp32dc_params_fast_11213, devKernelParams); /* Set up constant parameters for the mtgp32 generator */ /* mtgp32dc_params_fast_11213 is a constant of the type mtgp32_params_fast, it is a system constant */ /* devKernelParams is the destination*/ curandMakeMTGP32KernelState(dev_state, mtgp32dc_params_fast_11213, devKernelParams,N, 1234); /* Set up initial states for the mtgp32 generator */ /* * \param s - pointer to an array of states in device memory * \param params - Pointer to an array of type mtgp32_params_fast_t in host memory * \param k - pointer to a structure of type mtgp32_kernel_params_t in device memory * \param n - number of parameter sets/states to initialize * \param seed - seed value * * */ curand_test<<<1,N>>>(dev_state, dev_a); //ESBMC_verify_kernel(curand_test,1,N,dev_state, dev_a); cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost); printf("\nnew a: "); for (int i = 0; i < N; i++) printf("%f ", a[i]); free(a); cudaFree(&dev_a); cudaFree(&dev_state); cudaFree(&devKernelParams); return 0; }
5cd4a40255b59f39bba2aaf3ac2f9a1c2805a602.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <clover_field.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <clover_field_order.h> namespace quda { #ifdef GPU_CLOVER_DIRAC template<typename Float, typename Clover1, typename Clover2, typename Gauge> struct CloverTraceArg { Clover1 clover1; Clover2 clover2; Gauge gauge; Float coeff; CloverTraceArg(Clover1 &clover1, Clover2 &clover2, Gauge &gauge, Float coeff) : clover1(clover1), clover2(clover2), gauge(gauge), coeff(coeff) {} }; template <typename Float, typename Arg> __device__ __host__ void cloverSigmaTraceCompute(Arg & arg, const int x, int parity) { Float A[72]; if (parity==0) arg.clover1.load(A,x,parity); else arg.clover2.load(A,x,parity); // load the clover term into memory for (int mu=0; mu<4; mu++) { for (int nu=0; nu<mu; nu++) { Matrix<complex<Float>,3> mat; setZero(&mat); Float diag[2][6]; complex<Float> tri[2][15]; const int idtab[15]={0,1,3,6,10,2,4,7,11,5,8,12,9,13,14}; complex<Float> ctmp; for (int ch=0; ch<2; ++ch) { // factor of two is inherent to QUDA clover storage for (int i=0; i<6; i++) diag[ch][i] = 2.0*A[ch*36+i]; for (int i=0; i<15; i++) tri[ch][idtab[i]] = complex<Float>(2.0*A[ch*36+6+2*i], 2.0*A[ch*36+6+2*i+1]); } // X, Y if (nu == 0) { if (mu == 1) { for (int j=0; j<3; ++j) { mat(j,j).y = diag[0][j+3] + diag[1][j+3] - diag[0][j] - diag[1][j]; } // triangular part int jk=0; for (int j=1; j<3; ++j) { int jk2 = (j+3)*(j+2)/2 + 3; for (int k=0; k<j; ++k) { ctmp = tri[0][jk2] + tri[1][jk2] - tri[0][jk] - tri[1][jk]; mat(j,k).x = -ctmp.imag(); mat(j,k).y = ctmp.real(); mat(k,j).x = ctmp.imag(); mat(k,j).y = ctmp.real(); jk++; jk2++; } } // X Y } else if (mu == 2) { for (int j=0; j<3; ++j) { int jk = (j+3)*(j+2)/2; for (int k=0; k<3; ++k) { int kj = (k+3)*(k+2)/2 + j; mat(j,k) = conj(tri[0][kj]) - tri[0][jk] + conj(tri[1][kj]) - tri[1][jk]; jk++; } } // X Z } else if (mu == 3) { for (int j=0; j<3; ++j) { int jk = (j+3)*(j+2)/2; for (int k=0; k<3; ++k) { int kj = (k+3)*(k+2)/2 + j; ctmp = conj(tri[0][kj]) + tri[0][jk] - conj(tri[1][kj]) - tri[1][jk]; mat(j,k).x = -ctmp.imag(); mat(j,k).y = ctmp.real(); jk++; } } } // mu == 3 // X T } else if (nu == 1) { if (mu == 2) { // Y Z for (int j=0; j<3; ++j) { int jk = (j+3)*(j+2)/2; for (int k=0; k<3; ++k) { int kj = (k+3)*(k+2)/2 + j; ctmp = conj(tri[0][kj]) + tri[0][jk] + conj(tri[1][kj]) + tri[1][jk]; mat(j,k).x = ctmp.imag(); mat(j,k).y = -ctmp.real(); jk++; } } } else if (mu == 3){ // Y T for (int j=0; j<3; ++j) { int jk = (j+3)*(j+2)/2; for (int k=0; k<3; ++k) { int kj = (k+3)*(k+2)/2 + j; mat(j,k) = conj(tri[0][kj]) - tri[0][jk] - conj(tri[1][kj]) + tri[1][jk]; jk++; } } } // mu == 3 } // nu == 1 else if (nu == 2){ if (mu == 3) { for (int j=0; j<3; ++j) { mat(j,j).y = diag[0][j] - diag[0][j+3] - diag[1][j] + diag[1][j+3]; } int jk=0; for (int j=1; j<3; ++j) { int jk2 = (j+3)*(j+2)/2 + 3; for (int k=0; k<j; ++k) { ctmp = tri[0][jk] - tri[0][jk2] - tri[1][jk] + tri[1][jk2]; mat(j,k).x = -ctmp.imag(); mat(j,k).y = ctmp.real(); mat(k,j).x = ctmp.imag(); mat(k,j).y = ctmp.real(); jk++; jk2++; } } } } mat *= arg.coeff; arg.gauge((mu-1)*mu/2 + nu, x, parity) = mat; } // nu } // mu return; } template<typename Float, typename Arg> void cloverSigmaTrace(Arg &arg) { for (int x=0; x<arg.clover1.volumeCB; x++) { cloverSigmaTraceCompute<Float,Arg>(arg, x, 1); } return; } template<typename Float, typename Arg> __global__ void cloverSigmaTraceKernel(Arg arg) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= arg.clover1.volumeCB) return; // odd parity cloverSigmaTraceCompute<Float,Arg>(arg, idx, 1); } template<typename Float, typename Arg> class CloverSigmaTrace : Tunable { Arg &arg; const GaugeField &meta; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune the shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.clover1.volumeCB; } public: CloverSigmaTrace(Arg &arg, const GaugeField &meta) : arg(arg), meta(meta) { writeAuxString("stride=%d", arg.clover1.stride); } virtual ~CloverSigmaTrace() {;} void apply(const hipStream_t &stream){ if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); hipLaunchKernelGGL(( cloverSigmaTraceKernel<Float,Arg>), dim3(tp.grid),dim3(tp.block),0, 0, arg); } else { cloverSigmaTrace<Float,Arg>(arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return 0; } // Fix this long long bytes() const { return (arg.clover1.Bytes() + 6*arg.gauge.Bytes()) * arg.clover1.volumeCB; } }; // CloverSigmaTrace template<typename Float, typename Clover1, typename Clover2, typename Gauge> void computeCloverSigmaTrace(Clover1 clover1, Clover2 clover2, Gauge gauge, const GaugeField &meta, Float coeff) { typedef CloverTraceArg<Float, Clover1, Clover2, Gauge> Arg; Arg arg(clover1, clover2, gauge, coeff); CloverSigmaTrace<Float, Arg> traceCompute(arg, meta); traceCompute.apply(0); return; } template<typename Float> void computeCloverSigmaTrace(GaugeField& gauge, const CloverField& clover, Float coeff){ if(clover.isNative()) { typedef typename clover_mapper<Float>::type C; if (gauge.isNative()) { if (gauge.Reconstruct() == QUDA_RECONSTRUCT_NO) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; computeCloverSigmaTrace<Float>( C(clover,0), C(clover,1), G(gauge), gauge, coeff); } else if(gauge.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; computeCloverSigmaTrace<Float>( C(clover,0), C(clover,1), G(gauge), gauge, coeff); } else { errorQuda("Reconstruction type %d not supported", gauge.Reconstruct()); } } else { errorQuda("Gauge order %d not supported", gauge.Order()); } } else { errorQuda("clover order %d not supported", clover.Order()); } // clover order } #endif void computeCloverSigmaTrace(GaugeField& output, const CloverField& clover, double coeff) { #ifdef GPU_CLOVER_DIRAC if (clover.Precision() == QUDA_SINGLE_PRECISION) { computeCloverSigmaTrace<float>(output, clover, static_cast<float>(coeff)); } else if (clover.Precision() == QUDA_DOUBLE_PRECISION){ computeCloverSigmaTrace<double>(output, clover, coeff); } else { errorQuda("Precision %d not supported", clover.Precision()); } #else errorQuda("Clover has not been built"); #endif } } // namespace quda
5cd4a40255b59f39bba2aaf3ac2f9a1c2805a602.cu
#include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <clover_field.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <clover_field_order.h> namespace quda { #ifdef GPU_CLOVER_DIRAC template<typename Float, typename Clover1, typename Clover2, typename Gauge> struct CloverTraceArg { Clover1 clover1; Clover2 clover2; Gauge gauge; Float coeff; CloverTraceArg(Clover1 &clover1, Clover2 &clover2, Gauge &gauge, Float coeff) : clover1(clover1), clover2(clover2), gauge(gauge), coeff(coeff) {} }; template <typename Float, typename Arg> __device__ __host__ void cloverSigmaTraceCompute(Arg & arg, const int x, int parity) { Float A[72]; if (parity==0) arg.clover1.load(A,x,parity); else arg.clover2.load(A,x,parity); // load the clover term into memory for (int mu=0; mu<4; mu++) { for (int nu=0; nu<mu; nu++) { Matrix<complex<Float>,3> mat; setZero(&mat); Float diag[2][6]; complex<Float> tri[2][15]; const int idtab[15]={0,1,3,6,10,2,4,7,11,5,8,12,9,13,14}; complex<Float> ctmp; for (int ch=0; ch<2; ++ch) { // factor of two is inherent to QUDA clover storage for (int i=0; i<6; i++) diag[ch][i] = 2.0*A[ch*36+i]; for (int i=0; i<15; i++) tri[ch][idtab[i]] = complex<Float>(2.0*A[ch*36+6+2*i], 2.0*A[ch*36+6+2*i+1]); } // X, Y if (nu == 0) { if (mu == 1) { for (int j=0; j<3; ++j) { mat(j,j).y = diag[0][j+3] + diag[1][j+3] - diag[0][j] - diag[1][j]; } // triangular part int jk=0; for (int j=1; j<3; ++j) { int jk2 = (j+3)*(j+2)/2 + 3; for (int k=0; k<j; ++k) { ctmp = tri[0][jk2] + tri[1][jk2] - tri[0][jk] - tri[1][jk]; mat(j,k).x = -ctmp.imag(); mat(j,k).y = ctmp.real(); mat(k,j).x = ctmp.imag(); mat(k,j).y = ctmp.real(); jk++; jk2++; } } // X Y } else if (mu == 2) { for (int j=0; j<3; ++j) { int jk = (j+3)*(j+2)/2; for (int k=0; k<3; ++k) { int kj = (k+3)*(k+2)/2 + j; mat(j,k) = conj(tri[0][kj]) - tri[0][jk] + conj(tri[1][kj]) - tri[1][jk]; jk++; } } // X Z } else if (mu == 3) { for (int j=0; j<3; ++j) { int jk = (j+3)*(j+2)/2; for (int k=0; k<3; ++k) { int kj = (k+3)*(k+2)/2 + j; ctmp = conj(tri[0][kj]) + tri[0][jk] - conj(tri[1][kj]) - tri[1][jk]; mat(j,k).x = -ctmp.imag(); mat(j,k).y = ctmp.real(); jk++; } } } // mu == 3 // X T } else if (nu == 1) { if (mu == 2) { // Y Z for (int j=0; j<3; ++j) { int jk = (j+3)*(j+2)/2; for (int k=0; k<3; ++k) { int kj = (k+3)*(k+2)/2 + j; ctmp = conj(tri[0][kj]) + tri[0][jk] + conj(tri[1][kj]) + tri[1][jk]; mat(j,k).x = ctmp.imag(); mat(j,k).y = -ctmp.real(); jk++; } } } else if (mu == 3){ // Y T for (int j=0; j<3; ++j) { int jk = (j+3)*(j+2)/2; for (int k=0; k<3; ++k) { int kj = (k+3)*(k+2)/2 + j; mat(j,k) = conj(tri[0][kj]) - tri[0][jk] - conj(tri[1][kj]) + tri[1][jk]; jk++; } } } // mu == 3 } // nu == 1 else if (nu == 2){ if (mu == 3) { for (int j=0; j<3; ++j) { mat(j,j).y = diag[0][j] - diag[0][j+3] - diag[1][j] + diag[1][j+3]; } int jk=0; for (int j=1; j<3; ++j) { int jk2 = (j+3)*(j+2)/2 + 3; for (int k=0; k<j; ++k) { ctmp = tri[0][jk] - tri[0][jk2] - tri[1][jk] + tri[1][jk2]; mat(j,k).x = -ctmp.imag(); mat(j,k).y = ctmp.real(); mat(k,j).x = ctmp.imag(); mat(k,j).y = ctmp.real(); jk++; jk2++; } } } } mat *= arg.coeff; arg.gauge((mu-1)*mu/2 + nu, x, parity) = mat; } // nu } // mu return; } template<typename Float, typename Arg> void cloverSigmaTrace(Arg &arg) { for (int x=0; x<arg.clover1.volumeCB; x++) { cloverSigmaTraceCompute<Float,Arg>(arg, x, 1); } return; } template<typename Float, typename Arg> __global__ void cloverSigmaTraceKernel(Arg arg) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= arg.clover1.volumeCB) return; // odd parity cloverSigmaTraceCompute<Float,Arg>(arg, idx, 1); } template<typename Float, typename Arg> class CloverSigmaTrace : Tunable { Arg &arg; const GaugeField &meta; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune the shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.clover1.volumeCB; } public: CloverSigmaTrace(Arg &arg, const GaugeField &meta) : arg(arg), meta(meta) { writeAuxString("stride=%d", arg.clover1.stride); } virtual ~CloverSigmaTrace() {;} void apply(const cudaStream_t &stream){ if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); cloverSigmaTraceKernel<Float,Arg><<<tp.grid,tp.block,0>>>(arg); } else { cloverSigmaTrace<Float,Arg>(arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return 0; } // Fix this long long bytes() const { return (arg.clover1.Bytes() + 6*arg.gauge.Bytes()) * arg.clover1.volumeCB; } }; // CloverSigmaTrace template<typename Float, typename Clover1, typename Clover2, typename Gauge> void computeCloverSigmaTrace(Clover1 clover1, Clover2 clover2, Gauge gauge, const GaugeField &meta, Float coeff) { typedef CloverTraceArg<Float, Clover1, Clover2, Gauge> Arg; Arg arg(clover1, clover2, gauge, coeff); CloverSigmaTrace<Float, Arg> traceCompute(arg, meta); traceCompute.apply(0); return; } template<typename Float> void computeCloverSigmaTrace(GaugeField& gauge, const CloverField& clover, Float coeff){ if(clover.isNative()) { typedef typename clover_mapper<Float>::type C; if (gauge.isNative()) { if (gauge.Reconstruct() == QUDA_RECONSTRUCT_NO) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; computeCloverSigmaTrace<Float>( C(clover,0), C(clover,1), G(gauge), gauge, coeff); } else if(gauge.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; computeCloverSigmaTrace<Float>( C(clover,0), C(clover,1), G(gauge), gauge, coeff); } else { errorQuda("Reconstruction type %d not supported", gauge.Reconstruct()); } } else { errorQuda("Gauge order %d not supported", gauge.Order()); } } else { errorQuda("clover order %d not supported", clover.Order()); } // clover order } #endif void computeCloverSigmaTrace(GaugeField& output, const CloverField& clover, double coeff) { #ifdef GPU_CLOVER_DIRAC if (clover.Precision() == QUDA_SINGLE_PRECISION) { computeCloverSigmaTrace<float>(output, clover, static_cast<float>(coeff)); } else if (clover.Precision() == QUDA_DOUBLE_PRECISION){ computeCloverSigmaTrace<double>(output, clover, coeff); } else { errorQuda("Precision %d not supported", clover.Precision()); } #else errorQuda("Clover has not been built"); #endif } } // namespace quda
1f7d108ee58c45239d37d981d21c69e8f4439e09.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common.hpp" #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/scalar/scalar.hpp> #include <rmm/device_uvector.hpp> #include <hip/hip_runtime.h> #include <nvtx3/roctracer/roctx.h> /** * @brief Reserve CUDA malloc heap size * * Call this function to change the CUDA malloc heap size limit. * This value depends on the total size of all the malloc() * calls needed for redact_kernel. * * @param heap_size Number of bytes to reserve * Default is 1GB */ void set_malloc_heap_size(size_t heap_size = 1073741824) // 1GB { size_t max_malloc_heap_size = 0; hipDeviceGetLimit(&max_malloc_heap_size, hipLimitMallocHeapSize); if (max_malloc_heap_size < heap_size) { max_malloc_heap_size = heap_size; if (hipDeviceSetLimit(hipLimitMallocHeapSize, max_malloc_heap_size) != hipSuccess) { fprintf(stderr, "could not set malloc heap size to %ldMB\n", (heap_size / (1024 * 1024))); throw std::runtime_error(""); } } } /** * @brief Builds the output for each row * * This thread is called once per row in d_names. * * Note: This uses malloc() in a device kernel which works great * but is not very efficient. This can be useful for prototyping * on functions where performance is not yet important. * All calls to malloc() must have a corresponding free() call. * The separate free_kernel is launched for this purpose. * * @param d_names Column of names * @param d_visibilities Column of visibilities * @param redaction Redacted string replacement * @param d_output Output array of string_view objects */ __global__ void redact_kernel(cudf::column_device_view const d_names, cudf::column_device_view const d_visibilities, cudf::string_view redaction, cudf::string_view* d_output) { // The row index is resolved from the CUDA thread/block objects auto index = threadIdx.x + blockIdx.x * blockDim.x; // There may be more threads than actual rows if (index >= d_names.size()) return; auto const visible = cudf::string_view("public", 6); auto const name = d_names.element<cudf::string_view>(index); auto const vis = d_visibilities.element<cudf::string_view>(index); if (vis == visible) { auto const space_idx = name.find(' '); auto const first = name.substr(0, space_idx); auto const last_initial = name.substr(space_idx + 1, 1); auto const output_size = first.size_bytes() + last_initial.size_bytes() + 1; char* output_ptr = static_cast<char*>(malloc(output_size)); d_output[index] = cudf::string_view{output_ptr, output_size}; // build output string memcpy(output_ptr, last_initial.data(), last_initial.size_bytes()); output_ptr += last_initial.size_bytes(); *output_ptr++ = ' '; memcpy(output_ptr, first.data(), first.size_bytes()); } else { d_output[index] = cudf::string_view{redaction.data(), redaction.size_bytes()}; } } /** * @brief Frees the temporary individual string objects created in the * redact_kernel * * Like malloc(), free() is not very efficient but must be called for * each malloc() to return the memory to the CUDA malloc heap. * * @param redaction Redacted string replacement (not to be freed) * @param d_output Output array of string_view objects to free */ __global__ void free_kernel(cudf::string_view redaction, cudf::string_view* d_output, int count) { auto index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= count) return; auto ptr = const_cast<char*>(d_output[index].data()); if (ptr != redaction.data()) { free(ptr); } } std::unique_ptr<cudf::column> redact_strings(cudf::column_view const& names, cudf::column_view const& visibilities) { // all device memory operations and kernel functions will run on this stream auto stream = rmm::cuda_stream_default; set_malloc_heap_size(); // to illustrate adjusting the malloc heap auto const d_names = cudf::column_device_view::create(names, stream); auto const d_visibilities = cudf::column_device_view::create(visibilities, stream); auto const d_redaction = cudf::string_scalar(std::string("X X"), true, stream); constexpr int block_size = 128; // this arbitrary size should be a power of 2 auto const blocks = (names.size() + block_size - 1) / block_size; roctxRangePushA("redact_strings"); // create a vector for the output strings' pointers auto str_ptrs = new rmm::device_uvector<cudf::string_view>(names.size(), stream); auto result = [&] { // build the output strings hipLaunchKernelGGL(( redact_kernel), dim3(blocks), dim3(block_size), 0, stream.value(), *d_names, *d_visibilities, d_redaction.value(), str_ptrs->data()); // create strings column from the string_view vector // this copies all the individual strings into a single output column return cudf::make_strings_column(*str_ptrs, cudf::string_view{nullptr, 0}, stream); }(); // free the individual temporary memory pointers hipLaunchKernelGGL(( free_kernel), dim3(blocks), dim3(block_size), 0, stream.value(), d_redaction.value(), str_ptrs->data(), names.size()); delete str_ptrs; // wait for all of the above to finish stream.synchronize(); roctxRangePop(); return result; }
1f7d108ee58c45239d37d981d21c69e8f4439e09.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common.hpp" #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/scalar/scalar.hpp> #include <rmm/device_uvector.hpp> #include <cuda_runtime.h> #include <nvtx3/nvToolsExt.h> /** * @brief Reserve CUDA malloc heap size * * Call this function to change the CUDA malloc heap size limit. * This value depends on the total size of all the malloc() * calls needed for redact_kernel. * * @param heap_size Number of bytes to reserve * Default is 1GB */ void set_malloc_heap_size(size_t heap_size = 1073741824) // 1GB { size_t max_malloc_heap_size = 0; cudaDeviceGetLimit(&max_malloc_heap_size, cudaLimitMallocHeapSize); if (max_malloc_heap_size < heap_size) { max_malloc_heap_size = heap_size; if (cudaDeviceSetLimit(cudaLimitMallocHeapSize, max_malloc_heap_size) != cudaSuccess) { fprintf(stderr, "could not set malloc heap size to %ldMB\n", (heap_size / (1024 * 1024))); throw std::runtime_error(""); } } } /** * @brief Builds the output for each row * * This thread is called once per row in d_names. * * Note: This uses malloc() in a device kernel which works great * but is not very efficient. This can be useful for prototyping * on functions where performance is not yet important. * All calls to malloc() must have a corresponding free() call. * The separate free_kernel is launched for this purpose. * * @param d_names Column of names * @param d_visibilities Column of visibilities * @param redaction Redacted string replacement * @param d_output Output array of string_view objects */ __global__ void redact_kernel(cudf::column_device_view const d_names, cudf::column_device_view const d_visibilities, cudf::string_view redaction, cudf::string_view* d_output) { // The row index is resolved from the CUDA thread/block objects auto index = threadIdx.x + blockIdx.x * blockDim.x; // There may be more threads than actual rows if (index >= d_names.size()) return; auto const visible = cudf::string_view("public", 6); auto const name = d_names.element<cudf::string_view>(index); auto const vis = d_visibilities.element<cudf::string_view>(index); if (vis == visible) { auto const space_idx = name.find(' '); auto const first = name.substr(0, space_idx); auto const last_initial = name.substr(space_idx + 1, 1); auto const output_size = first.size_bytes() + last_initial.size_bytes() + 1; char* output_ptr = static_cast<char*>(malloc(output_size)); d_output[index] = cudf::string_view{output_ptr, output_size}; // build output string memcpy(output_ptr, last_initial.data(), last_initial.size_bytes()); output_ptr += last_initial.size_bytes(); *output_ptr++ = ' '; memcpy(output_ptr, first.data(), first.size_bytes()); } else { d_output[index] = cudf::string_view{redaction.data(), redaction.size_bytes()}; } } /** * @brief Frees the temporary individual string objects created in the * redact_kernel * * Like malloc(), free() is not very efficient but must be called for * each malloc() to return the memory to the CUDA malloc heap. * * @param redaction Redacted string replacement (not to be freed) * @param d_output Output array of string_view objects to free */ __global__ void free_kernel(cudf::string_view redaction, cudf::string_view* d_output, int count) { auto index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= count) return; auto ptr = const_cast<char*>(d_output[index].data()); if (ptr != redaction.data()) { free(ptr); } } std::unique_ptr<cudf::column> redact_strings(cudf::column_view const& names, cudf::column_view const& visibilities) { // all device memory operations and kernel functions will run on this stream auto stream = rmm::cuda_stream_default; set_malloc_heap_size(); // to illustrate adjusting the malloc heap auto const d_names = cudf::column_device_view::create(names, stream); auto const d_visibilities = cudf::column_device_view::create(visibilities, stream); auto const d_redaction = cudf::string_scalar(std::string("X X"), true, stream); constexpr int block_size = 128; // this arbitrary size should be a power of 2 auto const blocks = (names.size() + block_size - 1) / block_size; nvtxRangePushA("redact_strings"); // create a vector for the output strings' pointers auto str_ptrs = new rmm::device_uvector<cudf::string_view>(names.size(), stream); auto result = [&] { // build the output strings redact_kernel<<<blocks, block_size, 0, stream.value()>>>( *d_names, *d_visibilities, d_redaction.value(), str_ptrs->data()); // create strings column from the string_view vector // this copies all the individual strings into a single output column return cudf::make_strings_column(*str_ptrs, cudf::string_view{nullptr, 0}, stream); }(); // free the individual temporary memory pointers free_kernel<<<blocks, block_size, 0, stream.value()>>>( d_redaction.value(), str_ptrs->data(), names.size()); delete str_ptrs; // wait for all of the above to finish stream.synchronize(); nvtxRangePop(); return result; }
bf76b73ffbf573fa8ef4d438e4e612b6095e8bf1.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorFactories.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <ATen/NumericUtils.h> #include <c10/hip/HIPMathCompat.h> #include <ATen/NumericUtils.h> #include <c10/util/complex.h> namespace at { namespace native { void bitwise_not_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { gpu_kernel(iter, []GPU_LAMBDA(bool a) { return !a; }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ~a; }); }); } } void exp_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exp_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::exp(a); }); }); } void exp2_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "exp2_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::exp2(a); }); }); } void expm1_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "expm1_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::expm1(a); }); }); } void i0_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "i0_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_i0(a); }); }); } // We manually overload rsqrt because std::rsqrt does not work with complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t rsqrt_wrapper(scalar_t v) { return ::rsqrt(v); } template<typename T> __host__ __device__ static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) { const c10::complex<T> one = c10::complex<T>(1.0, 0); // std::sqrt for c10::complex is overloaded in c10/util/complex_math.h return one / ::sqrt(v); } void rsqrt_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "rsqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { // In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float. return rsqrt_wrapper(a); }); }); } void sqrt_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sqrt(a); }); }); } void sigmoid_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "sigmoid_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { scalar_t one = scalar_t(1); return one / (one + ::exp(- a)); }); }); } void sinc_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sinc_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { if (a == scalar_t(0)) { return scalar_t(1); } else { scalar_t product = scalar_t(M_PI) * a; return std::sin(product) / product; } }); }); } void logit_kernel_cuda(TensorIterator& iter, Scalar eps_scalar) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "logit_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC eps = eps_scalar.to<T_ACC>(); if (eps < T_ACC(0)) { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t { const T_ACC x_acc = static_cast<T_ACC>(x); return c10::hip::compat::log(x_acc / (T_ACC(1) - x_acc)); }); } else { const T_ACC lo = eps; const T_ACC hi = T_ACC(1) - eps; gpu_kernel( iter, [lo, hi] GPU_LAMBDA(scalar_t x) -> scalar_t { const T_ACC x_acc = static_cast<T_ACC>(x); T_ACC z = x_acc < lo ? lo : (x_acc > hi ? hi : x_acc); return c10::hip::compat::log(z / (T_ACC(1) - z)); }); } }); } void erf_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::erf(a); }); }); } void erfc_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfc_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::erfc(a); }); }); } void erfinv_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "erfinv_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::erfinv(a); }); }); } void clamp_kernel_cuda(TensorIterator& iter, Scalar min_value, Scalar max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_cuda", [&]() { auto lower = min_value.to<scalar_t>(); auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(::max(v, lower), upper); } }); }); } void clamp_min_kernel_cuda(TensorIterator& iter, Scalar min_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_min_cuda", [&]() { auto lower = min_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::max(v, lower); } }); }); } void clamp_max_kernel_cuda(TensorIterator& iter, Scalar max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_max_cuda", [&]() { auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(v, upper); } }); }); } void nan_to_num_kernel_cuda( TensorIterator& iter, c10::optional<double> nan, c10::optional<double> pos_inf, c10::optional<double> neg_inf) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "nan_to_num_cuda", [&]() { scalar_t nan_replacement = static_cast<scalar_t>(nan.value_or(0.)); scalar_t pos_inf_replacement = pos_inf.has_value() ? static_cast<scalar_t>(pos_inf.value()) : std::numeric_limits<scalar_t>::max(); scalar_t neg_inf_replacement = neg_inf.has_value() ? static_cast<scalar_t>(neg_inf.value()) : std::numeric_limits<scalar_t>::lowest(); gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t { return ( at::_isnan(a) ? nan_replacement : (a == std::numeric_limits<scalar_t>::infinity() ? pos_inf_replacement : (a == -std::numeric_limits<scalar_t>::infinity() ? neg_inf_replacement : a))); }); }); } void kaiser_window_kernel_cuda(TensorIterator& iter, int64_t window_length, double beta_){ AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){ using T_ACC = acc_type<scalar_t, true>; const T_ACC inv_alpha = static_cast<T_ACC>(2.0 / (window_length - 1)); const T_ACC beta = static_cast<T_ACC>(beta_); const T_ACC inv_i0_beta = 1.0 / calc_i0(beta); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t a) -> scalar_t { T_ACC x = static_cast<T_ACC>(a) * inv_alpha - 1; T_ACC y = std::max<T_ACC>(0, 1 - x * x); return calc_i0(beta * ::sqrt(y)) * inv_i0_beta; }); }); } REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda); REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda); REGISTER_DISPATCH(exp2_stub, &exp2_kernel_cuda); REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda); REGISTER_DISPATCH(i0_stub, &i0_kernel_cuda); REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda); REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda); REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda); REGISTER_DISPATCH(sinc_stub, &sinc_kernel_cuda); REGISTER_DISPATCH(logit_stub, &logit_kernel_cuda); REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda); REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda); REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda); REGISTER_DISPATCH(clamp_stub, &clamp_kernel_cuda); REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_cuda); REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_cuda); REGISTER_DISPATCH(nan_to_num_stub, &nan_to_num_kernel_cuda); REGISTER_DISPATCH(kaiser_window_stub, &kaiser_window_kernel_cuda); } // namespace native } // namespace at
bf76b73ffbf573fa8ef4d438e4e612b6095e8bf1.cu
#include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorFactories.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <ATen/NumericUtils.h> #include <c10/cuda/CUDAMathCompat.h> #include <ATen/NumericUtils.h> #include <c10/util/complex.h> namespace at { namespace native { void bitwise_not_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { gpu_kernel(iter, []GPU_LAMBDA(bool a) { return !a; }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ~a; }); }); } } void exp_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exp_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::exp(a); }); }); } void exp2_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "exp2_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::exp2(a); }); }); } void expm1_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "expm1_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::expm1(a); }); }); } void i0_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "i0_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_i0(a); }); }); } // We manually overload rsqrt because std::rsqrt does not work with complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t rsqrt_wrapper(scalar_t v) { return ::rsqrt(v); } template<typename T> __host__ __device__ static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) { const c10::complex<T> one = c10::complex<T>(1.0, 0); // std::sqrt for c10::complex is overloaded in c10/util/complex_math.h return one / ::sqrt(v); } void rsqrt_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "rsqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { // In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float. return rsqrt_wrapper(a); }); }); } void sqrt_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sqrt(a); }); }); } void sigmoid_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "sigmoid_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { scalar_t one = scalar_t(1); return one / (one + std::exp(- a)); }); }); } void sinc_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sinc_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { if (a == scalar_t(0)) { return scalar_t(1); } else { scalar_t product = scalar_t(M_PI) * a; return std::sin(product) / product; } }); }); } void logit_kernel_cuda(TensorIterator& iter, Scalar eps_scalar) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "logit_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC eps = eps_scalar.to<T_ACC>(); if (eps < T_ACC(0)) { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t { const T_ACC x_acc = static_cast<T_ACC>(x); return c10::cuda::compat::log(x_acc / (T_ACC(1) - x_acc)); }); } else { const T_ACC lo = eps; const T_ACC hi = T_ACC(1) - eps; gpu_kernel( iter, [lo, hi] GPU_LAMBDA(scalar_t x) -> scalar_t { const T_ACC x_acc = static_cast<T_ACC>(x); T_ACC z = x_acc < lo ? lo : (x_acc > hi ? hi : x_acc); return c10::cuda::compat::log(z / (T_ACC(1) - z)); }); } }); } void erf_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::erf(a); }); }); } void erfc_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfc_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::erfc(a); }); }); } void erfinv_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "erfinv_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::erfinv(a); }); }); } void clamp_kernel_cuda(TensorIterator& iter, Scalar min_value, Scalar max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_cuda", [&]() { auto lower = min_value.to<scalar_t>(); auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(::max(v, lower), upper); } }); }); } void clamp_min_kernel_cuda(TensorIterator& iter, Scalar min_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_min_cuda", [&]() { auto lower = min_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::max(v, lower); } }); }); } void clamp_max_kernel_cuda(TensorIterator& iter, Scalar max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_max_cuda", [&]() { auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(v, upper); } }); }); } void nan_to_num_kernel_cuda( TensorIterator& iter, c10::optional<double> nan, c10::optional<double> pos_inf, c10::optional<double> neg_inf) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "nan_to_num_cuda", [&]() { scalar_t nan_replacement = static_cast<scalar_t>(nan.value_or(0.)); scalar_t pos_inf_replacement = pos_inf.has_value() ? static_cast<scalar_t>(pos_inf.value()) : std::numeric_limits<scalar_t>::max(); scalar_t neg_inf_replacement = neg_inf.has_value() ? static_cast<scalar_t>(neg_inf.value()) : std::numeric_limits<scalar_t>::lowest(); gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t { return ( at::_isnan(a) ? nan_replacement : (a == std::numeric_limits<scalar_t>::infinity() ? pos_inf_replacement : (a == -std::numeric_limits<scalar_t>::infinity() ? neg_inf_replacement : a))); }); }); } void kaiser_window_kernel_cuda(TensorIterator& iter, int64_t window_length, double beta_){ AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){ using T_ACC = acc_type<scalar_t, true>; const T_ACC inv_alpha = static_cast<T_ACC>(2.0 / (window_length - 1)); const T_ACC beta = static_cast<T_ACC>(beta_); const T_ACC inv_i0_beta = 1.0 / calc_i0(beta); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t a) -> scalar_t { T_ACC x = static_cast<T_ACC>(a) * inv_alpha - 1; T_ACC y = std::max<T_ACC>(0, 1 - x * x); return calc_i0(beta * ::sqrt(y)) * inv_i0_beta; }); }); } REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda); REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda); REGISTER_DISPATCH(exp2_stub, &exp2_kernel_cuda); REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda); REGISTER_DISPATCH(i0_stub, &i0_kernel_cuda); REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda); REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda); REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda); REGISTER_DISPATCH(sinc_stub, &sinc_kernel_cuda); REGISTER_DISPATCH(logit_stub, &logit_kernel_cuda); REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda); REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda); REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda); REGISTER_DISPATCH(clamp_stub, &clamp_kernel_cuda); REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_cuda); REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_cuda); REGISTER_DISPATCH(nan_to_num_stub, &nan_to_num_kernel_cuda); REGISTER_DISPATCH(kaiser_window_stub, &kaiser_window_kernel_cuda); } // namespace native } // namespace at
77d6e768729d77562406f0733ccaf8ee7fe337df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <iostream> #include <assert.h> #include <nvmatrix_kernels.cuh> #include <nvmatrix.cuh> #include <conv_util.cuh> using namespace std; __device__ inline float square(const float a) { return a * a; } /* * blockIdx.y determines module in batches of B_Y * blockIdx.x determines filter in batches of B_X * filtersPerThread * * weights: (numModules, numColors, filterPixels, numFilters) * Not fully coalesced if B_X < 32, so use cache. */ template <int B_Y, int B_X, int filtersPerThread> __global__ void kNormalizeLCWeights(float* weights, const uint numFilters, const int numModules, const uint weightsPerFilter, const float norm) { const uint moduleIdx = B_Y * blockIdx.y + threadIdx.y; const uint filterIdx = B_X * blockIdx.x + threadIdx.x; float prod[filtersPerThread]; #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] = 0; } if (moduleIdx < numModules) { weights += moduleIdx * weightsPerFilter * numFilters + filterIdx; for (uint p = 0; p < weightsPerFilter; ++p) { #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] += square(weights[p * numFilters + i * B_X]); } } #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] = sqrtf(prod[i]); prod[i] = prod[i] > norm ? __fdividef(norm, prod[i]) : 1.0f; } for (uint p = 0; p < weightsPerFilter; ++p) { #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { weights[p * numFilters + i * B_X] *= prod[i]; } } } } /* * weights: (numModules, numColors, filterPixels, numFilters) */ void normalizeLocalWeights(NVMatrix& weights, int numModules, float norm) { int numFilters = weights.getNumCols(); int weightsPerFilter = weights.getNumRows() / numModules; assert(numModules * weightsPerFilter == weights.getNumRows()); assert(!weights.isTrans()); assert(weights.isContiguous()); assert(numFilters % 16 == 0); int bx = numFilters % 32 == 0 ? 32 : 16; int by = bx == 32 ? 4 : 8; int filtersPerThread = numFilters % 128 == 0 ? 4 : numFilters % 64 == 0 ? 2 : 1; dim3 blocks(numFilters / (bx * filtersPerThread), DIVUP(numModules, by)); dim3 threads(bx, by); if (filtersPerThread == 4) { hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 4>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 4>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } else if (filtersPerThread == 2) { hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 2>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 2>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 1>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 1>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } else { hipFuncSetCacheConfig(kNormalizeLCWeights<8, 16, 1>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kNormalizeLCWeights<8, 16, 1>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } } } /* * Block size 4x32 * blockIdx.x determines img idx in batches of 32*imgsPerThread * blockIdx.y determines channel idx, pixel idx in batches of 4 * * threadIdx.x determins case idx * threadIdx.y determines pixel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kCrop(float* imgs, float* target, const uint numImages, const int imgStride, const uint imgSize, const uint tgtSize, const uint startY, const uint startX) { const uint imgPixels = imgSize * imgSize; const uint tgtPixels = tgtSize * tgtSize; const uint caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const uint blockChanIdx = blockIdx.y / DIVUP(tgtPixels, 4); const uint tgtPixelIdx = 4*(blockIdx.y % DIVUP(tgtPixels, 4)) + threadIdx.y; const uint tgtPxY = tgtPixelIdx / tgtSize; const uint tgtPxX = tgtPixelIdx % tgtSize; const uint srcPixelIdx = (startY + tgtPxY) * imgSize + startX + tgtPxX; if (tgtPixelIdx < tgtPixels) { imgs += (blockChanIdx * imgPixels + srcPixelIdx) * imgStride + caseIdx; target += (blockChanIdx * tgtPixels + tgtPixelIdx) * numImages + caseIdx; #pragma unroll for (uint i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || (caseIdx + 32 * i < numImages)) { target[i * 32] = imgs[i * 32]; } } } } /* * Block size 4x32 * blockIdx.y determines pixel idx in batches of 4 * blockIdx.x determines case idx in batches of 32*imgsPerThread * threadIdx.y determines pixel idx * threadIdx.x determines case idx * * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) * * Each thread produces (y,u,v) values for a particular (r,g,b) pixel * * The RGB --> YUV transform is (http://en.wikipedia.org/wiki/YUV): * * [Y] [0.2126 0.7152 0.0722 ][R] * [U] = [-0.09991 -0.33609 0.436 ][G] * [V] [0.615 -0.55861 -0.05639][B] */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kRGBToYUV(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdx = blockIdx.y * 4 + threadIdx.y; if (pxIdx < imgPixels) { const int imgChannelStride = imgPixels * imgStride; const int tgtChannelStride = imgPixels * numImages; imgs += pxIdx * imgStride + caseIdx; target += pxIdx * numImages + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numImages) { const float R = imgs[0 * imgChannelStride + i * 32]; const float G = imgs[1 * imgChannelStride + i * 32]; const float B = imgs[2 * imgChannelStride + i * 32]; target[0 * tgtChannelStride + i * 32] = 0.2126f * R + 0.7152f * G + 0.0722f * B; // Y target[1 * tgtChannelStride + i * 32] = -0.09991f * R + -0.33609f * G + 0.436f * B; // U target[2 * tgtChannelStride + i * 32] = 0.615f * R + -0.55861f * G + -0.05639f * B; // V } } } } __device__ inline float labf(const float x) { if (x > 0.0088564517f) { return __powf(x, 0.3333f); } return 7.787037f * x + 0.13793103f; } /* * Block size 4x32 * blockIdx.y determines pixel idx in batches of 4 * blockIdx.x determines case idx in batches of 32*imgsPerThread * threadIdx.y determines pixel idx * threadIdx.x determines case idx * * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) * * This proceeds in two steps. * * - First, RGB values are linearly transformed to XYZ as per * http://en.wikipedia.org/wiki/CIE_XYZ_color_space * - Second, XYZ values are nonlinearly transformed to L*a*b* as per * http://en.wikipedia.org/wiki/Lab_color_space#The_forward_transformation * * Each thread produces (L*,a*,b*) values for a particular (r,g,b) pixel * * The RGB --> XYZ transform is: * * [X] [0.49 0.31 0.2 ][R] * [Y] = 5.6506753 * [0.17697 0.8124 0.01063 ][G] * [Z] [0 0.01 0.99 ][B] * * NOTE: The input should be in the range 0-1. Don't do mean-subtraction beforehand. * * Then X_max, Y_max, Z_max = 5.6506753. * * The range of the L* values is [0, 100]. * If the center flag is given, the range will be [-50, 50]. * */ template <int imgsPerThread, bool checkCaseBounds, bool center> __global__ void kRGBToLAB(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdx = blockIdx.y * 4 + threadIdx.y; if (pxIdx < imgPixels) { const int imgChannelStride = imgPixels * imgStride; const int tgtChannelStride = imgPixels * numImages; imgs += pxIdx * imgStride + caseIdx; target += pxIdx * numImages + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numImages) { const float R = imgs[0 * imgChannelStride + i * 32]; const float G = imgs[1 * imgChannelStride + i * 32]; const float B = imgs[2 * imgChannelStride + i * 32]; const float X = (0.49f * R + 0.31f * G + 0.2f * B); const float Y = (0.17697f * R + 0.8124f * G + 0.01063f * B); const float Z = (0.01f * G + 0.99f * B); const float labX = labf(X); const float labY = labf(Y); const float labZ = labf(Z); target[0 * tgtChannelStride + i * 32] = 116.0f * labY - 16.0f - (center ? 50.0f : 0); // L* target[1 * tgtChannelStride + i * 32] = 500.0f * (labX - labY); // a* target[2 * tgtChannelStride + i * 32] = 200.0f * (labY - labZ); // b* } } } } /* * Block size 16x32. * Each block produces a 4x4 chunk of the output image. * threadIdx.y determines pixel idx in 4x4 chunk. * threadIdx.x determines case idx. * blockIdx.x determines case idx in batches of 32*imgsPerThread. * blockIdx.y determines 4x4 chunk idx, channel idx. * * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) * * imgSize = scale * tgtSize (roughly) * * This is a rather naive kernel that relies on cache for speed. But all it's doing * is basic texture manipulation, which is very local in nature, so it should be ok. * Also, it will in practice be a tiny fraction of the runtime of a large convnet. * * So that is my justification for being lazy here. */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kResizeBilinear(float* imgs, float* target, const int imgSize, const int tgtSize, const int numImages, const int imgStride, const float scale, const float centerScale) { const int numChunksX = DIVUP(tgtSize, 4); const int numChunks = numChunksX * numChunksX; const int channelIdx = blockIdx.y / numChunks; const int chunkIdx = blockIdx.y % numChunks; const int chunkIdxX = chunkIdx % numChunksX; const int chunkIdxY = chunkIdx / numChunksX; const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int imgPixels = imgSize * imgSize; const int tgtPixels = tgtSize * tgtSize; const int pxX = 4 * chunkIdxX + threadIdx.y % 4; const int pxY = 4 * chunkIdxY + threadIdx.y / 4; if (pxY < tgtSize && pxX < tgtSize) { const int pxIdx = pxY * tgtSize + pxX; imgs += channelIdx * imgPixels * imgStride + caseIdx; target += channelIdx * tgtPixels * numImages + pxIdx * numImages + caseIdx; // This will cause slight distortions at the edges when upsampling in some cases. // But I think that's not a big deal. const float srcPxX = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxX) * scale + centerScale)); const float srcPxY = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxY) * scale + centerScale)); const float u = floorf(srcPxX + 1) - srcPxX; const float w = srcPxY - floorf(srcPxY); // Consider doing max(0, min(imgSize, x)) here const int srcPx0 = (__float2int_rd(srcPxY) * imgSize + __float2int_rd(srcPxX)); // top-left const int srcPx1 = srcPx0 + 1; // top-right const int srcPx2 = srcPx0 + imgSize; // bottom-left const int srcPx3 = srcPx2 + 1; // bottom-right #pragma unroll for (int c = 0; c < imgsPerThread; ++c) { if (!checkCaseBounds || caseIdx + c * 32 < numImages) { const float val0 = imgs[srcPx0 * imgStride + c * 32]; const float val1 = imgs[srcPx1 * imgStride + c * 32]; const float val2 = imgs[srcPx2 * imgStride + c * 32]; const float val3 = imgs[srcPx3 * imgStride + c * 32]; const float c0 = u * (val0 - val1) + val1; const float c1 = u * (val2 - val3) + val3; target[32 * c] = w * (c1 - c0) + c0; } } } } /* * Block size B_YxB_X. * B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx * B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) * * target can be the same matrix as imgs. * radius must be one of 3, 5, 7, 9. * * Tried imgsPerThread, slower. */ template<int B_Y, int B_X, int radius> __global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize, const int numImages, const int imgStride, const bool horiz, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilter[radius]; const int imgPixels = imgSize * imgSize; const int ty = B_Y * blockIdx.y + threadIdx.y; const int channelIdx = ty / imgSize; const int rowIdx = ty % imgSize; const int imgIdx = B_X*blockIdx.x + threadIdx.x; const int filterWidth = 2*radius+1; // const int tidx = B_Y * threadIdx.y + threadIdx.x; if (horiz) { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx; } else { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx; } float outputs[filterWidth-1]; #pragma unroll for (int r = 0; r < filterWidth-1; r++) { outputs[r] = 0; } if (threadIdx.x < filterWidth-1) { shFilter[threadIdx.x] = filter[threadIdx.x]; } __syncthreads(); if (imgIdx < numImages) { // This writes radius*2 = filterWidth - 1 values to outputs #pragma unroll for (int col = 0; col < radius; col++) { float px = imgs[0]; #pragma unroll for (int r = 0; r < radius + 1 + col; r++) { outputs[r] += px * shFilter[radius + col - r]; } imgs += horiz ? imgStride : imgStride * imgSize; } // Unfortunately this has to be at this level of granularity if (scaleTargets != 0) { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { float* t = &target[0]; t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } else { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { target[0] = scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } } } /* * Block size B_YxB_X * blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, numOutputs, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds> __global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels, const int numImages, const int startX, const int strideX, const int outputsX, const bool reverse, const float scaleTargets, const float scaleOutput) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread); const int outputIdxX = blockIdx.x / numImgBlocks; const int outputIdxY = blockIdx.y / numChanBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread; const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread); if (myChanIdx >= numChannels) { return; } // if (blockIdx.x != 0 || blockIdx.y != 0) { // return; // } const int outputIdx = outputIdxY * outputsX + outputIdxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startImgPxX = startX + outputIdxX * strideX; const int startImgPxY = startX + outputIdxY * strideX; const int imgIdx = blockImgIdx + threadIdx.x; const int imgPx = startImgPxY * imgSize + startImgPxX; imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx; target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx; if (scaleTargets != 0) { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } else { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } } /* * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, outputs, numImages) */ void _convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, bool reverse, float scaleTargets, float scaleOutput) { int numImages = reverse ? target.getNumCols() : images.getNumCols(); int imgPixels = imgSize * imgSize; assert(!images.isTrans()); assert(!target.isTrans()); assert(images.isContiguous()); assert(target.isContiguous()); assert(strideX > 1); int outputsX = DIVUP(imgSize, strideX); int outputs = outputsX * outputsX; if (reverse) { assert(target.getNumRows() == numChannels * outputs); } else { assert(images.getNumRows() == numChannels * imgPixels); } if (scaleTargets == 0) { if (reverse) { images.resize(numChannels * imgPixels, numImages); images.apply(NVMatrixOps::Zero()); } else { target.resize(numChannels*outputs, numImages); } } else { if (reverse) { assert(images.getNumRows() == numChannels * outputs); assert(images.getNumCols() == numImages); } else { assert(target.getNumRows() == numChannels * outputs); assert(target.getNumCols() == numImages); } } int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; int chansPerThread = numChannels % 8 == 0 ? 2 : 1; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX); if (imgsPerThread == 4) { if (chansPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (chansPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } else { if (chansPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } } void convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput); } void convBedOfNailsUndo(NVMatrix& actsGrad, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) */ void convGaussianBlur(NVMatrix& images, NVMatrix& filter, NVMatrix& target, bool horiz, int numChannels, float scaleTargets, float scaleOutputs) { int numImages = images.getNumCols(); int radius = filter.getNumCols() / 2; int imgPixels = images.getNumRows() / numChannels; int imgSize = int(sqrt(imgPixels)); assert(imgPixels == imgSize * imgSize); assert(radius >= 1 && radius <= 4); assert(imgSize >= 2 * radius + 1); assert(filter.getNumRows() == 1); assert(images.getNumRows() == numChannels * imgPixels); assert(!images.isTrans()); assert(!filter.isTrans()); assert(!target.isTrans()); assert(target.isContiguous()); if (scaleTargets == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y)); if (radius == 1) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 1>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 2) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 2>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 3) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 3>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 4) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 4>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } } /* * Block size 1x128 * blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread * blockIdx.y determines pixel.y * * So each block does one output for some number of images and all the fliters. * * threadIdx.x determines img idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int imgsPerThread, int numFilters, bool checkCaseBounds> __global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y; const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += pxIdx * numImages + imgIdx; denoms += pxIdx * numImages + imgIdx; meanDiffs += imgIdx; target += pxIdx * numImages + imgIdx; float prod[numFilters][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * 128] = prod[f][i]; target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region of pixels for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { __shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -sizeX/2 + blockPxX); const int startPxY = MAX(0, -sizeX/2 + blockPxY); const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3); const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -sizeX/2 + myPxY; const int myStartPxX = -sizeX/2 + myPxX; const int myEndPxY = myPxY + DIVUP(sizeX, 2); const int myEndPxX = myPxX + DIVUP(sizeX, 2); const int imgIdx = blockImgIdx + threadIdx.x; imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]); } } } } __syncthreads(); } } // imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX; // imgs += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y */ template<int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds, bool blocked> __global__ void kFCNorm(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; meanDiffs += pxIdx * numImages + imgIdx; denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; float prod[imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] = 0; } } const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx; const int loopStartF = blocked ? startF : MAX(0, startF); const int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] += square(meanDiffs[f * imgPixels * numImages + i * B_X]); } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] = 1 + addScale * prod[i]; denoms[i * B_X] = prod[i]; target[i * B_X] = imgs[i * B_X] * __powf(prod[i], -powScale); } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked> __global__ void kFRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int imgPixels = imgSize * imgSize; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; acts += pxIdx * numImages + imgIdx; inputs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; outGrads += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; float prod[imgsPerThread]; // if (imgIdx != 0 || pxIdx != 0 || filterIdx != 0) { // return; // } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[i] = 0; } const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx; const int loopStartF = blocked ? startF : MAX(0, startF); const int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] += acts[f * imgPixels * numImages + i * B_X]; } } } // printf("gpu f start: %d, end: %d\n", loopStartF, loopEndF); if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = inputs[i * B_X]; const float out = outGrads[i * B_X]; const float den = denoms[i * B_X]; prod[i] = inp * prod[i] + out * __powf(den, -powScale); target[i * B_X] = prod[i]; } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = inputs[i * B_X]; const float out = outGrads[i * B_X]; const float den = denoms[i * B_X]; prod[i] = inp * prod[i] + out * __powf(den, -powScale); target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i]; } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread * * sizeX should be something like 3 or 5 for this function. Not much more. * TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2). */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kTICA_manyfilter(float* imgs, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float scaleTarget, const float scaleOutput) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(imgs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]); } } } } } imgs += pxIdx * numImages; if (scaleTarget == 0) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i])); } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i])); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * ticas: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread * * sizeX should be something like 3 or 5 for this function. Not much more. * TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2). */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kTICAGrad_manyfilter(float* imgs, float* ticas, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float scaleTarget, const float scaleOutput) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; ticas += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { // adding 1/S values prod[f][i] += ticas[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]; } } } } } if (scaleTarget == 0) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * sqrtf(prod[f][i]); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { for (int my = startOutputY; my < endOutputY; my++) { const float regionStartY = fmaxf(0, startX + my * strideX); const float regionEndY = fminf(imgSize, startX + my * strideX + subsX); const float regionSizeY = regionEndY - regionStartY; for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; const float regionStartX = fmaxf(0, startX + mx * strideX); const float regionEndX = fminf(imgSize, startX + mx * strideX + subsX); const float regionSizeX = regionEndX - regionStartX; // It's important to do the division here, because pushing division into the below // loops makes the code 4x slower. const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * maxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { __shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X]; } } } for (int my = startOutputY; my < endOutputY; my++) { for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i]; prod[f][i] += (img == ma) * mg; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalProbMaxUndo(float* maxout_h, float* maxout_p, float* hGrads, float* pGrads, float* target_z, float* target_t, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = DIVUP(numFilters, B_Y*filtersPerThread); const int outputIdxX = blockIdx.x / numImgBlocks; const int outputIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int myFilterIdx = (blockFilterIdx + threadIdx.y*filtersPerThread); if (myFilterIdx >= numFilters) { return; } const int outputIdx = outputIdxY * outputsX + outputIdxX; //const int outputIdx = outputIdxY * imgSize + outputIdxX; const int numOutputs = outputsX * outputsX; //const int numOutputs = imgSize * imgSize; const int imgPixels = imgSize * imgSize; const int startImgPxX = startX + outputIdxX * strideX; const int startImgPxY = startX + outputIdxY * strideX; const int imgIdx = blockImgIdx + threadIdx.x; maxout_h += myFilterIdx * imgPixels * numImages + imgIdx; hGrads += myFilterIdx * imgPixels * numImages + imgIdx; target_z += myFilterIdx * imgPixels * numImages + imgIdx; maxout_p += (myFilterIdx * numOutputs + outputIdx) * numImages + imgIdx; pGrads += (myFilterIdx * numOutputs + outputIdx) * numImages + imgIdx; target_t += (myFilterIdx * numOutputs + outputIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; for (int f = 0; f < filtersPerThread; f++) { for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } const int loopStartY = MAX(0, startImgPxY); const int loopStartX = MAX(0, startImgPxX); const int loopEndY = MIN(imgSize, startImgPxY + subsX); const int loopEndX = MIN(imgSize, startImgPxX + subsX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { for (int f = 0; f < filtersPerThread; f++) { const float ma = maxout_h[(f * imgPixels + imgPx) * numImages + i * B_X]; const float mg = hGrads[(f * imgPixels + imgPx) * numImages + i * B_X]; prod[f][i] += ma * mg; } } } } } for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { for (int f = 0; f < filtersPerThread; f++) { prod[f][i] -= (1 - maxout_p[f*numOutputs*numImages + i * B_X]) * pGrads[f*numOutputs*numImages + i * B_X]; } } } for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { for (int f = 0; f < filtersPerThread; f++) { const float ma = maxout_h[(f * imgPixels + imgPx) * numImages + i * B_X]; const float mg = hGrads[(f * imgPixels + imgPx) * numImages + i * B_X]; target_z[(f*imgPixels + imgPx) * numImages + i * B_X] = ma * mg - (prod[f][i] * ma); } } } } } for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { for (int f = 0; f < filtersPerThread; f++) { const float ma = -maxout_p[f*numOutputs*numImages + i * B_X]; const float mg = pGrads[f*numOutputs*numImages + i * B_X]; target_t[f*numOutputs*numImages + i * B_X] = -(ma * mg - (prod[f][i] * ma)); } } } } /* * acts := -2 x scale x acts x outGrads / denoms */ template<int B_X, int eltsPerThread> __global__ void kRNormUndoPrelims(float* acts, float* denoms, float* outGrads, const uint numElements, const float scale) { const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x; const uint numThreads = B_X * gridDim.x; for (uint i = e; i < numElements; i += numThreads*eltsPerThread) { #pragma unroll for (uint k = 0; k < eltsPerThread; k++) { if (i + k * B_X < numElements) { acts[i + k * B_X] = __fdividef(scale*outGrads[i + k * B_X] * acts[i + k * B_X], denoms[i + k * B_X]); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int imgPixels = imgSize * imgSize; const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1); const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1); const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1); const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1); const int imgIdx = blockImgIdx + threadIdx.x; acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int sy = startY; sy < endY; sy++) { for (int sx = startX; sx < endX; sx++) { const int outPx = sy * imgSize + sx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X]; } } } } } // outGrads += blockPx * numImages; if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { __shared__ float shActs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1); const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1); const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4); const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1; const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1; const int myEndPxY = myPxY + sizeX/2 + 1; const int myEndPxX = myPxX + sizeX/2 + 1; const int imgIdx = blockImgIdx + threadIdx.x; acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += shActs[f][threadIdx.x + i * B_X]; } } } } __syncthreads(); } } acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX; acts += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } } void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target, int subsX, int startX, int strideX, int outputsX) { convLocalMaxUndo(images, maxGrads, maxActs, target, subsX, startX, strideX, outputsX, 0, 1); } /* * imgs: (numFilters * imgPixels, numImages) * maxGrads: (numFilters * numOutputs, numImages) * maxActs: (numFilters * numOutputs, numImages) * target: (numFilters * imgPixels, numImages) */ void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) { int outputs = outputsX * outputsX; int numImages = images.getNumCols(); int numFilters = maxGrads.getNumRows() / outputs; int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(maxGrads.getNumRows() == numFilters * outputs); assert(maxGrads.getNumCols() == numImages); assert(!images.isTrans()); assert(!target.isTrans()); assert(!maxGrads.isTrans()); assert(!maxActs.isTrans()); assert(images.isContiguous()); assert(maxGrads.isContiguous()); assert(maxActs.isContiguous()); assert(maxGrads.isSameDims(maxActs)); assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); target.resize(images); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } cutilCheckMsg("convLocalMaxUndo: kernel execution failed"); } void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize) { convLocalAvgUndo(avgGrads, target, subsX, startX, strideX, outputsX, imgSize, 0, 1); } /* * avgGrads: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize, float scaleTargets, float scaleOutput) { int numImages = avgGrads.getNumCols(); int outputs = outputsX * outputsX; int imgPixels = imgSize * imgSize; int numFilters = avgGrads.getNumRows() / outputs; assert(avgGrads.getNumRows() == numFilters * outputs); assert(!target.isTrans()); assert(!avgGrads.isTrans()); assert(avgGrads.isContiguous()); assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); target.resize(numFilters * imgPixels, numImages); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 4)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } cutilCheckMsg("convLocalAvgUndo: kernel execution failed"); } /* prob max undo */ void localProbMaxUndo(NVMatrix& maxout_h, NVMatrix& maxout_p, NVMatrix& hGrads, NVMatrix& pGrads, NVMatrix& target_z, NVMatrix& target_t, int subsX, int startX, int strideX, int outputsX, int imgSize) { int outputs = outputsX * outputsX; int imgPixels = imgSize * imgSize; int numImages = maxout_h.getNumCols(); int numFilters = maxout_h.getNumRows() / imgPixels; assert(maxout_h.getNumRows() / numFilters == imgPixels); assert(maxout_h.getNumRows() == numFilters * imgPixels); assert(imgSize * imgSize == imgPixels); assert(hGrads.getNumRows() == numFilters * imgPixels); assert(hGrads.getNumCols() == numImages); assert(target_z.getNumRows() == numFilters * imgPixels); assert(target_z.getNumCols() == numImages); assert(maxout_p.getNumRows() == numFilters * outputs); assert(maxout_p.getNumCols() == numImages); assert(pGrads.getNumRows() == numFilters * outputs); assert(pGrads.getNumCols() == numImages); assert(target_t.getNumRows() == numFilters * outputs); assert(target_t.getNumCols() == numImages); assert(!maxout_h.isTrans()); assert(!maxout_p.isTrans()); assert(!target_t.isTrans()); assert(!target_z.isTrans()); assert(!hGrads.isTrans()); assert(!pGrads.isTrans()); assert(maxout_h.isContiguous()); assert(maxout_p.isContiguous()); assert(hGrads.isContiguous()); assert(pGrads.isContiguous()); assert(target_z.isContiguous()); assert(target_t.isContiguous()); assert(numFilters % 16 == 0); assert(strideX <= subsX); target_z.resize(maxout_h); target_t.resize(maxout_p); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { hipLaunchKernelGGL(( kLocalProbMaxUndo<4, 32, 4, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, maxout_h.getDevData(), maxout_p.getDevData(), hGrads.getDevData(), pGrads.getDevData(), target_z.getDevData(), target_t.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX); } else { hipLaunchKernelGGL(( kLocalProbMaxUndo<4, 32, 4, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, maxout_h.getDevData(), maxout_p.getDevData(), hGrads.getDevData(), pGrads.getDevData(), target_z.getDevData(), target_t.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { hipLaunchKernelGGL(( kLocalProbMaxUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, maxout_h.getDevData(), maxout_p.getDevData(), hGrads.getDevData(), pGrads.getDevData(), target_z.getDevData(), target_t.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX); } else { hipLaunchKernelGGL(( kLocalProbMaxUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, maxout_h.getDevData(), maxout_p.getDevData(), hGrads.getDevData(), pGrads.getDevData(), target_z.getDevData(), target_t.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX); } } else { if (checkCaseBounds) { hipLaunchKernelGGL(( kLocalProbMaxUndo<4, 32, 1, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, maxout_h.getDevData(), maxout_p.getDevData(), hGrads.getDevData(), pGrads.getDevData(), target_z.getDevData(), target_t.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX); } else { hipLaunchKernelGGL(( kLocalProbMaxUndo<4, 32, 1, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, maxout_h.getDevData(), maxout_p.getDevData(), hGrads.getDevData(), pGrads.getDevData(), target_z.getDevData(), target_t.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX); } } cutilCheckMsg("convLocalMaxUndo: kernel execution failed"); } void convResponseNorm(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) { convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) */ void convContrastNorm(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(meanDiffs.isSameDims(images)); assert(!meanDiffs.isTrans()); assert(!images.isTrans()); assert(images.isContiguous()); assert(meanDiffs.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); target.resize(images); denoms.resize(images); assert(target.isContiguous()); if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = 8; int filtersPerThread = 4; int bx = 8; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); assert(numFilters % filtersPerThread == 0); dim3 threads(bx, 16); dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, hipFuncCachePreferL1); // L1 faster here hipLaunchKernelGGL(( kCNorm2<8, 8, 4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, hipFuncCachePreferL1); // L1 faster here hipLaunchKernelGGL(( kCNorm2<8, 8, 4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } } else { bool checkCaseBounds = numImages % 128 != 0; if (numFilters <= 8) { dim3 threads(128); dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize); if (numFilters == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 2) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 3) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 4) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 5) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 6) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 7) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 8) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } } else { dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } } } cutilCheckMsg("convResponseNorm: kernel execution failed"); } void convContrastNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& meanDiffs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { int numImages = outGrads.getNumCols(); int imgPixels = outGrads.getNumRows() / numFilters; int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(outGrads.getNumRows() == numFilters * imgPixels); assert(denoms.isSameDims(outGrads)); assert(acts.isSameDims(denoms)); assert(!denoms.isTrans()); assert(!outGrads.isTrans()); assert(!acts.isTrans()); assert(!target.isTrans()); assert(outGrads.isContiguous()); assert(numFilters % 16 == 0); target.resize(outGrads); assert(target.isContiguous()); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 4; dim3 threads(128); dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread)))); hipLaunchKernelGGL(( kRNormUndoPrelims<128, 4>), dim3(blocks), dim3(threads), 0, 0, acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale); // Now the main routine if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; int filtersPerThread = 4; int bx = 16; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); threads = dim3(bx, 16); blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (imgsPerThread == 8) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } } else { int imgsPerThread = numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; threads = dim3(32, 4); blocks = dim3(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } } cutilCheckMsg("kRNormUndo: kernel execution failed"); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) * * imgSize = scale * tgtSize */ void convResizeBilinear(NVMatrix& images, NVMatrix& target, int imgSize, int tgtSize, float scale) { assert(!images.isTrans()); assert(!target.isTrans()); int imgPixels = imgSize * imgSize; int tgtPixels = tgtSize * tgtSize; int numChannels = images.getNumRows() / imgPixels; int numImages = images.getNumCols(); assert(images.getNumRows() == numChannels * imgPixels); target.resize(numChannels * tgtPixels, numImages); assert(target.isContiguous()); int numChunksX = DIVUP(tgtSize, 4); int numChunks = numChunksX * numChunksX; double imgCenter = imgSize * 0.5; double tgtCenter = tgtSize * 0.5; double centerScale = imgCenter - tgtCenter * scale; int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 16); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), numChannels * numChunks); if (imgsPerThread == 4) { if (checkCaseBounds) { hipFuncSetCacheConfig(kResizeBilinear<4, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } else { hipFuncSetCacheConfig(kResizeBilinear<4, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { hipFuncSetCacheConfig(kResizeBilinear<2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } else { hipFuncSetCacheConfig(kResizeBilinear<2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kResizeBilinear<1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } else { hipFuncSetCacheConfig(kResizeBilinear<1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } } cutilCheckMsg("convResizeBilinear: kernel execution failed"); } /* * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) */ void convRGBToYUV(NVMatrix& images, NVMatrix& target) { assert(!images.isTrans()); assert(!target.isTrans()); int imgPixels = images.getNumRows() / 3; int numImages = images.getNumCols(); assert(images.getNumRows() == 3 * imgPixels); target.resize(3 * imgPixels, numImages); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (imgsPerThread == 4) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToYUV<4, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToYUV<4, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToYUV<2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToYUV<2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToYUV<1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToYUV<1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } cutilCheckMsg("convRGBToYUV: kernel execution failed"); } /* * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) */ void convRGBToLAB(NVMatrix& images, NVMatrix& target, bool center) { assert(!images.isTrans()); assert(!target.isTrans()); int imgPixels = images.getNumRows() / 3; int numImages = images.getNumCols(); assert(images.getNumRows() == 3 * imgPixels); target.resize(3 * imgPixels, numImages); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (imgsPerThread == 4) { if (center) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<4, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToLAB<4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<4, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<4, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToLAB<4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<4, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } } else if (imgsPerThread == 2) { if (center) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<2, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToLAB<2, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<2, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToLAB<2, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } } else { if (center) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<1, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<1, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToLAB<1, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<1, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<1, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<1, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { hipFuncSetCacheConfig(kRGBToLAB<1, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<1, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } } cutilCheckMsg("convRGBToLAB: kernel execution failed"); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) */ void convCrop(NVMatrix& imgs, NVMatrix& target, int imgSize, int tgtSize, int startY, int startX) { int numImages = imgs.getNumCols(); int imgPixels = imgSize * imgSize; int tgtPixels = tgtSize * tgtSize; int numChannels = imgs.getNumRows() / imgPixels; assert(imgs.getNumRows() == imgPixels * numChannels); assert(imgPixels == imgSize * imgSize); assert(imgSize - startY >= tgtSize); assert(imgSize - startX >= tgtSize); assert(startY >= 0); assert(startX >= 0); target.resize(numChannels * tgtPixels, numImages); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 blocks(DIVUP(numImages, 32 * imgsPerThread), numChannels * DIVUP(tgtPixels, 4)); dim3 threads(32, 4); if (imgsPerThread == 4) { if (checkCaseBounds) { hipLaunchKernelGGL(( kCrop<4, true>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } else { hipLaunchKernelGGL(( kCrop<4, false>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { hipLaunchKernelGGL(( kCrop<2, true>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } else { hipLaunchKernelGGL(( kCrop<2, false>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } } else { if (checkCaseBounds) { hipLaunchKernelGGL(( kCrop<1, true>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } else { hipLaunchKernelGGL(( kCrop<1, false>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } } cutilCheckMsg("convCrop: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * ticas: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) (out) * * Computes TICA-style gradient for given feature maps * f(x) = exp(-(sum_i{x_i^2}^(1/2))) * dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps) * * eps added for numerical stability */ void convTICAGrad(NVMatrix& images, NVMatrix& ticas, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(!images.isTrans()); assert(images.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); assert(ticas.isSameDims(images)); assert(ticas.isContiguous()); if (scaleTarget == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } assert(target.isContiguous()); // TEMPORARY assert(numFilters > 8); assert(sizeX < 6); dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; if (checkCaseBounds) { hipFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kTICAGrad_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), ticas.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } else { hipFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kTICAGrad_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), ticas.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } cutilCheckMsg("convTICAGrad: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) (out) * * Computes TICA-style gradient for given feature maps * f(x) = exp(-(sum_i{x_i^2}^(1/2))) * dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps) * * eps added for numerical stability */ void convTICA(NVMatrix& images, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(!images.isTrans()); assert(images.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); if (scaleTarget == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } assert(target.isContiguous()); // TEMPORARY assert(numFilters > 8); assert(sizeX < 6); dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; if (checkCaseBounds) { hipFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kTICA_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } else { hipFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kTICA_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } cutilCheckMsg("convTICA: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * Note: at present, I have no code to compute the meanDiffs. So it should be set * to be equal to images. In other words, this isn't really doing contrast normalization, * just response normalization. */ void convContrastNormCrossMap(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(meanDiffs.isSameDims(images)); assert(sizeF > 0 && sizeF <= numFilters); assert(!meanDiffs.isTrans()); assert(!images.isTrans()); assert(images.isContiguous()); assert(meanDiffs.isContiguous()); assert(numFilters % 16 == 0); target.resize(images); denoms.resize(images); assert(target.isContiguous()); bool checkCaseBounds = numImages % 128 != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize); if (blocked) { if (checkCaseBounds) { hipFuncSetCacheConfig(kFCNorm<4, 32, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFCNorm<4, 32, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } else { hipFuncSetCacheConfig(kFCNorm<4, 32, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFCNorm<4, 32, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kFCNorm<4, 32, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFCNorm<4, 32, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } else { hipFuncSetCacheConfig(kFCNorm<4, 32, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFCNorm<4, 32, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } } cutilCheckMsg("convContrastNormCrossMap: kernel execution failed"); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormCrossMapUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked, float scaleTargets, float scaleOutput) { int numImages = outGrads.getNumCols(); int imgPixels = outGrads.getNumRows() / numFilters; int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(sizeF > 0 && sizeF <= numFilters); assert(outGrads.getNumRows() == numFilters * imgPixels); assert(denoms.isSameDims(outGrads)); assert(acts.isSameDims(denoms)); assert(!denoms.isTrans()); assert(!outGrads.isTrans()); assert(!acts.isTrans()); assert(!target.isTrans()); assert(outGrads.isContiguous()); assert(numFilters % 16 == 0); target.resize(outGrads); assert(target.isContiguous()); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 4; dim3 threads(128); dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread)))); hipLaunchKernelGGL(( kRNormUndoPrelims<128, 4>), dim3(blocks), dim3(threads), 0, 0, acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale); // Now the main routine dim3 threads2 = dim3(32, 4); dim3 blocks2 = dim3(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; if (blocked) { if (scaleTargets == 0 && scaleOutput == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, true, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, false, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, true, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, false, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } } else { if (scaleTargets == 0 && scaleOutput == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, true, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, false, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, true, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, false, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } } cutilCheckMsg("convResponseNormCrossMapUndo: kernel execution failed"); } void convResponseNormCrossMap(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) { convContrastNormCrossMap(images, images, denoms, target, numFilters, sizeF, addScale, powScale, blocked); }
77d6e768729d77562406f0733ccaf8ee7fe337df.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <iostream> #include <assert.h> #include <nvmatrix_kernels.cuh> #include <nvmatrix.cuh> #include <conv_util.cuh> using namespace std; __device__ inline float square(const float a) { return a * a; } /* * blockIdx.y determines module in batches of B_Y * blockIdx.x determines filter in batches of B_X * filtersPerThread * * weights: (numModules, numColors, filterPixels, numFilters) * Not fully coalesced if B_X < 32, so use cache. */ template <int B_Y, int B_X, int filtersPerThread> __global__ void kNormalizeLCWeights(float* weights, const uint numFilters, const int numModules, const uint weightsPerFilter, const float norm) { const uint moduleIdx = B_Y * blockIdx.y + threadIdx.y; const uint filterIdx = B_X * blockIdx.x + threadIdx.x; float prod[filtersPerThread]; #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] = 0; } if (moduleIdx < numModules) { weights += moduleIdx * weightsPerFilter * numFilters + filterIdx; for (uint p = 0; p < weightsPerFilter; ++p) { #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] += square(weights[p * numFilters + i * B_X]); } } #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] = sqrtf(prod[i]); prod[i] = prod[i] > norm ? __fdividef(norm, prod[i]) : 1.0f; } for (uint p = 0; p < weightsPerFilter; ++p) { #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { weights[p * numFilters + i * B_X] *= prod[i]; } } } } /* * weights: (numModules, numColors, filterPixels, numFilters) */ void normalizeLocalWeights(NVMatrix& weights, int numModules, float norm) { int numFilters = weights.getNumCols(); int weightsPerFilter = weights.getNumRows() / numModules; assert(numModules * weightsPerFilter == weights.getNumRows()); assert(!weights.isTrans()); assert(weights.isContiguous()); assert(numFilters % 16 == 0); int bx = numFilters % 32 == 0 ? 32 : 16; int by = bx == 32 ? 4 : 8; int filtersPerThread = numFilters % 128 == 0 ? 4 : numFilters % 64 == 0 ? 2 : 1; dim3 blocks(numFilters / (bx * filtersPerThread), DIVUP(numModules, by)); dim3 threads(bx, by); if (filtersPerThread == 4) { cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 4>, cudaFuncCachePreferL1); kNormalizeLCWeights<4, 32, 4><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } else if (filtersPerThread == 2) { cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 2>, cudaFuncCachePreferL1); kNormalizeLCWeights<4, 32, 2><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 1>, cudaFuncCachePreferL1); kNormalizeLCWeights<4, 32, 1><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } else { cudaFuncSetCacheConfig(kNormalizeLCWeights<8, 16, 1>, cudaFuncCachePreferL1); kNormalizeLCWeights<8, 16, 1><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm); } } } /* * Block size 4x32 * blockIdx.x determines img idx in batches of 32*imgsPerThread * blockIdx.y determines channel idx, pixel idx in batches of 4 * * threadIdx.x determins case idx * threadIdx.y determines pixel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kCrop(float* imgs, float* target, const uint numImages, const int imgStride, const uint imgSize, const uint tgtSize, const uint startY, const uint startX) { const uint imgPixels = imgSize * imgSize; const uint tgtPixels = tgtSize * tgtSize; const uint caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const uint blockChanIdx = blockIdx.y / DIVUP(tgtPixels, 4); const uint tgtPixelIdx = 4*(blockIdx.y % DIVUP(tgtPixels, 4)) + threadIdx.y; const uint tgtPxY = tgtPixelIdx / tgtSize; const uint tgtPxX = tgtPixelIdx % tgtSize; const uint srcPixelIdx = (startY + tgtPxY) * imgSize + startX + tgtPxX; if (tgtPixelIdx < tgtPixels) { imgs += (blockChanIdx * imgPixels + srcPixelIdx) * imgStride + caseIdx; target += (blockChanIdx * tgtPixels + tgtPixelIdx) * numImages + caseIdx; #pragma unroll for (uint i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || (caseIdx + 32 * i < numImages)) { target[i * 32] = imgs[i * 32]; } } } } /* * Block size 4x32 * blockIdx.y determines pixel idx in batches of 4 * blockIdx.x determines case idx in batches of 32*imgsPerThread * threadIdx.y determines pixel idx * threadIdx.x determines case idx * * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) * * Each thread produces (y,u,v) values for a particular (r,g,b) pixel * * The RGB --> YUV transform is (http://en.wikipedia.org/wiki/YUV): * * [Y] [0.2126 0.7152 0.0722 ][R] * [U] = [-0.09991 -0.33609 0.436 ][G] * [V] [0.615 -0.55861 -0.05639][B] */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kRGBToYUV(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdx = blockIdx.y * 4 + threadIdx.y; if (pxIdx < imgPixels) { const int imgChannelStride = imgPixels * imgStride; const int tgtChannelStride = imgPixels * numImages; imgs += pxIdx * imgStride + caseIdx; target += pxIdx * numImages + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numImages) { const float R = imgs[0 * imgChannelStride + i * 32]; const float G = imgs[1 * imgChannelStride + i * 32]; const float B = imgs[2 * imgChannelStride + i * 32]; target[0 * tgtChannelStride + i * 32] = 0.2126f * R + 0.7152f * G + 0.0722f * B; // Y target[1 * tgtChannelStride + i * 32] = -0.09991f * R + -0.33609f * G + 0.436f * B; // U target[2 * tgtChannelStride + i * 32] = 0.615f * R + -0.55861f * G + -0.05639f * B; // V } } } } __device__ inline float labf(const float x) { if (x > 0.0088564517f) { return __powf(x, 0.3333f); } return 7.787037f * x + 0.13793103f; } /* * Block size 4x32 * blockIdx.y determines pixel idx in batches of 4 * blockIdx.x determines case idx in batches of 32*imgsPerThread * threadIdx.y determines pixel idx * threadIdx.x determines case idx * * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) * * This proceeds in two steps. * * - First, RGB values are linearly transformed to XYZ as per * http://en.wikipedia.org/wiki/CIE_XYZ_color_space * - Second, XYZ values are nonlinearly transformed to L*a*b* as per * http://en.wikipedia.org/wiki/Lab_color_space#The_forward_transformation * * Each thread produces (L*,a*,b*) values for a particular (r,g,b) pixel * * The RGB --> XYZ transform is: * * [X] [0.49 0.31 0.2 ][R] * [Y] = 5.6506753 * [0.17697 0.8124 0.01063 ][G] * [Z] [0 0.01 0.99 ][B] * * NOTE: The input should be in the range 0-1. Don't do mean-subtraction beforehand. * * Then X_max, Y_max, Z_max = 5.6506753. * * The range of the L* values is [0, 100]. * If the center flag is given, the range will be [-50, 50]. * */ template <int imgsPerThread, bool checkCaseBounds, bool center> __global__ void kRGBToLAB(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdx = blockIdx.y * 4 + threadIdx.y; if (pxIdx < imgPixels) { const int imgChannelStride = imgPixels * imgStride; const int tgtChannelStride = imgPixels * numImages; imgs += pxIdx * imgStride + caseIdx; target += pxIdx * numImages + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numImages) { const float R = imgs[0 * imgChannelStride + i * 32]; const float G = imgs[1 * imgChannelStride + i * 32]; const float B = imgs[2 * imgChannelStride + i * 32]; const float X = (0.49f * R + 0.31f * G + 0.2f * B); const float Y = (0.17697f * R + 0.8124f * G + 0.01063f * B); const float Z = (0.01f * G + 0.99f * B); const float labX = labf(X); const float labY = labf(Y); const float labZ = labf(Z); target[0 * tgtChannelStride + i * 32] = 116.0f * labY - 16.0f - (center ? 50.0f : 0); // L* target[1 * tgtChannelStride + i * 32] = 500.0f * (labX - labY); // a* target[2 * tgtChannelStride + i * 32] = 200.0f * (labY - labZ); // b* } } } } /* * Block size 16x32. * Each block produces a 4x4 chunk of the output image. * threadIdx.y determines pixel idx in 4x4 chunk. * threadIdx.x determines case idx. * blockIdx.x determines case idx in batches of 32*imgsPerThread. * blockIdx.y determines 4x4 chunk idx, channel idx. * * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) * * imgSize = scale * tgtSize (roughly) * * This is a rather naive kernel that relies on cache for speed. But all it's doing * is basic texture manipulation, which is very local in nature, so it should be ok. * Also, it will in practice be a tiny fraction of the runtime of a large convnet. * * So that is my justification for being lazy here. */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kResizeBilinear(float* imgs, float* target, const int imgSize, const int tgtSize, const int numImages, const int imgStride, const float scale, const float centerScale) { const int numChunksX = DIVUP(tgtSize, 4); const int numChunks = numChunksX * numChunksX; const int channelIdx = blockIdx.y / numChunks; const int chunkIdx = blockIdx.y % numChunks; const int chunkIdxX = chunkIdx % numChunksX; const int chunkIdxY = chunkIdx / numChunksX; const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int imgPixels = imgSize * imgSize; const int tgtPixels = tgtSize * tgtSize; const int pxX = 4 * chunkIdxX + threadIdx.y % 4; const int pxY = 4 * chunkIdxY + threadIdx.y / 4; if (pxY < tgtSize && pxX < tgtSize) { const int pxIdx = pxY * tgtSize + pxX; imgs += channelIdx * imgPixels * imgStride + caseIdx; target += channelIdx * tgtPixels * numImages + pxIdx * numImages + caseIdx; // This will cause slight distortions at the edges when upsampling in some cases. // But I think that's not a big deal. const float srcPxX = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxX) * scale + centerScale)); const float srcPxY = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxY) * scale + centerScale)); const float u = floorf(srcPxX + 1) - srcPxX; const float w = srcPxY - floorf(srcPxY); // Consider doing max(0, min(imgSize, x)) here const int srcPx0 = (__float2int_rd(srcPxY) * imgSize + __float2int_rd(srcPxX)); // top-left const int srcPx1 = srcPx0 + 1; // top-right const int srcPx2 = srcPx0 + imgSize; // bottom-left const int srcPx3 = srcPx2 + 1; // bottom-right #pragma unroll for (int c = 0; c < imgsPerThread; ++c) { if (!checkCaseBounds || caseIdx + c * 32 < numImages) { const float val0 = imgs[srcPx0 * imgStride + c * 32]; const float val1 = imgs[srcPx1 * imgStride + c * 32]; const float val2 = imgs[srcPx2 * imgStride + c * 32]; const float val3 = imgs[srcPx3 * imgStride + c * 32]; const float c0 = u * (val0 - val1) + val1; const float c1 = u * (val2 - val3) + val3; target[32 * c] = w * (c1 - c0) + c0; } } } } /* * Block size B_YxB_X. * B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx * B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) * * target can be the same matrix as imgs. * radius must be one of 3, 5, 7, 9. * * Tried imgsPerThread, slower. */ template<int B_Y, int B_X, int radius> __global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize, const int numImages, const int imgStride, const bool horiz, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilter[radius]; const int imgPixels = imgSize * imgSize; const int ty = B_Y * blockIdx.y + threadIdx.y; const int channelIdx = ty / imgSize; const int rowIdx = ty % imgSize; const int imgIdx = B_X*blockIdx.x + threadIdx.x; const int filterWidth = 2*radius+1; // const int tidx = B_Y * threadIdx.y + threadIdx.x; if (horiz) { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx; } else { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx; } float outputs[filterWidth-1]; #pragma unroll for (int r = 0; r < filterWidth-1; r++) { outputs[r] = 0; } if (threadIdx.x < filterWidth-1) { shFilter[threadIdx.x] = filter[threadIdx.x]; } __syncthreads(); if (imgIdx < numImages) { // This writes radius*2 = filterWidth - 1 values to outputs #pragma unroll for (int col = 0; col < radius; col++) { float px = imgs[0]; #pragma unroll for (int r = 0; r < radius + 1 + col; r++) { outputs[r] += px * shFilter[radius + col - r]; } imgs += horiz ? imgStride : imgStride * imgSize; } // Unfortunately this has to be at this level of granularity if (scaleTargets != 0) { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { float* t = &target[0]; t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } else { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { target[0] = scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } } } /* * Block size B_YxB_X * blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, numOutputs, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds> __global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels, const int numImages, const int startX, const int strideX, const int outputsX, const bool reverse, const float scaleTargets, const float scaleOutput) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread); const int outputIdxX = blockIdx.x / numImgBlocks; const int outputIdxY = blockIdx.y / numChanBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread; const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread); if (myChanIdx >= numChannels) { return; } // if (blockIdx.x != 0 || blockIdx.y != 0) { // return; // } const int outputIdx = outputIdxY * outputsX + outputIdxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startImgPxX = startX + outputIdxX * strideX; const int startImgPxY = startX + outputIdxY * strideX; const int imgIdx = blockImgIdx + threadIdx.x; const int imgPx = startImgPxY * imgSize + startImgPxX; imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx; target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx; if (scaleTargets != 0) { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } else { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } } /* * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, outputs, numImages) */ void _convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, bool reverse, float scaleTargets, float scaleOutput) { int numImages = reverse ? target.getNumCols() : images.getNumCols(); int imgPixels = imgSize * imgSize; assert(!images.isTrans()); assert(!target.isTrans()); assert(images.isContiguous()); assert(target.isContiguous()); assert(strideX > 1); int outputsX = DIVUP(imgSize, strideX); int outputs = outputsX * outputsX; if (reverse) { assert(target.getNumRows() == numChannels * outputs); } else { assert(images.getNumRows() == numChannels * imgPixels); } if (scaleTargets == 0) { if (reverse) { images.resize(numChannels * imgPixels, numImages); images.apply(NVMatrixOps::Zero()); } else { target.resize(numChannels*outputs, numImages); } } else { if (reverse) { assert(images.getNumRows() == numChannels * outputs); assert(images.getNumCols() == numImages); } else { assert(target.getNumRows() == numChannels * outputs); assert(target.getNumCols() == numImages); } } int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; int chansPerThread = numChannels % 8 == 0 ? 2 : 1; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX); if (imgsPerThread == 4) { if (chansPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (chansPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 2, 1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 2, 1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 2, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 2, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } else { if (chansPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 1, 1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 1, 1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 1, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 1, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } } void convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput); } void convBedOfNailsUndo(NVMatrix& actsGrad, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) */ void convGaussianBlur(NVMatrix& images, NVMatrix& filter, NVMatrix& target, bool horiz, int numChannels, float scaleTargets, float scaleOutputs) { int numImages = images.getNumCols(); int radius = filter.getNumCols() / 2; int imgPixels = images.getNumRows() / numChannels; int imgSize = int(sqrt(imgPixels)); assert(imgPixels == imgSize * imgSize); assert(radius >= 1 && radius <= 4); assert(imgSize >= 2 * radius + 1); assert(filter.getNumRows() == 1); assert(images.getNumRows() == numChannels * imgPixels); assert(!images.isTrans()); assert(!filter.isTrans()); assert(!target.isTrans()); assert(target.isContiguous()); if (scaleTargets == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y)); if (radius == 1) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 1><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 2) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 2><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 3) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 3><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 4) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 4><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } } /* * Block size 1x128 * blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread * blockIdx.y determines pixel.y * * So each block does one output for some number of images and all the fliters. * * threadIdx.x determines img idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int imgsPerThread, int numFilters, bool checkCaseBounds> __global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y; const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += pxIdx * numImages + imgIdx; denoms += pxIdx * numImages + imgIdx; meanDiffs += imgIdx; target += pxIdx * numImages + imgIdx; float prod[numFilters][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * 128] = prod[f][i]; target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region of pixels for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { __shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -sizeX/2 + blockPxX); const int startPxY = MAX(0, -sizeX/2 + blockPxY); const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3); const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -sizeX/2 + myPxY; const int myStartPxX = -sizeX/2 + myPxX; const int myEndPxY = myPxY + DIVUP(sizeX, 2); const int myEndPxX = myPxX + DIVUP(sizeX, 2); const int imgIdx = blockImgIdx + threadIdx.x; imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]); } } } } __syncthreads(); } } // imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX; // imgs += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y */ template<int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds, bool blocked> __global__ void kFCNorm(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; meanDiffs += pxIdx * numImages + imgIdx; denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; float prod[imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] = 0; } } const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx; const int loopStartF = blocked ? startF : MAX(0, startF); const int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] += square(meanDiffs[f * imgPixels * numImages + i * B_X]); } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] = 1 + addScale * prod[i]; denoms[i * B_X] = prod[i]; target[i * B_X] = imgs[i * B_X] * __powf(prod[i], -powScale); } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked> __global__ void kFRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int imgPixels = imgSize * imgSize; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; acts += pxIdx * numImages + imgIdx; inputs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; outGrads += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; float prod[imgsPerThread]; // if (imgIdx != 0 || pxIdx != 0 || filterIdx != 0) { // return; // } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[i] = 0; } const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx; const int loopStartF = blocked ? startF : MAX(0, startF); const int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] += acts[f * imgPixels * numImages + i * B_X]; } } } // printf("gpu f start: %d, end: %d\n", loopStartF, loopEndF); if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = inputs[i * B_X]; const float out = outGrads[i * B_X]; const float den = denoms[i * B_X]; prod[i] = inp * prod[i] + out * __powf(den, -powScale); target[i * B_X] = prod[i]; } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = inputs[i * B_X]; const float out = outGrads[i * B_X]; const float den = denoms[i * B_X]; prod[i] = inp * prod[i] + out * __powf(den, -powScale); target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i]; } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread * * sizeX should be something like 3 or 5 for this function. Not much more. * TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2). */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kTICA_manyfilter(float* imgs, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float scaleTarget, const float scaleOutput) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(imgs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]); } } } } } imgs += pxIdx * numImages; if (scaleTarget == 0) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i])); } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i])); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * ticas: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread * * sizeX should be something like 3 or 5 for this function. Not much more. * TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2). */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kTICAGrad_manyfilter(float* imgs, float* ticas, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float scaleTarget, const float scaleOutput) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; ticas += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { // adding 1/S values prod[f][i] += ticas[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]; } } } } } if (scaleTarget == 0) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * sqrtf(prod[f][i]); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { for (int my = startOutputY; my < endOutputY; my++) { const float regionStartY = fmaxf(0, startX + my * strideX); const float regionEndY = fminf(imgSize, startX + my * strideX + subsX); const float regionSizeY = regionEndY - regionStartY; for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; const float regionStartX = fmaxf(0, startX + mx * strideX); const float regionEndX = fminf(imgSize, startX + mx * strideX + subsX); const float regionSizeX = regionEndX - regionStartX; // It's important to do the division here, because pushing division into the below // loops makes the code 4x slower. const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * maxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { __shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X]; } } } for (int my = startOutputY; my < endOutputY; my++) { for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i]; prod[f][i] += (img == ma) * mg; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalProbMaxUndo(float* maxout_h, float* maxout_p, float* hGrads, float* pGrads, float* target_z, float* target_t, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = DIVUP(numFilters, B_Y*filtersPerThread); const int outputIdxX = blockIdx.x / numImgBlocks; const int outputIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int myFilterIdx = (blockFilterIdx + threadIdx.y*filtersPerThread); if (myFilterIdx >= numFilters) { return; } const int outputIdx = outputIdxY * outputsX + outputIdxX; //const int outputIdx = outputIdxY * imgSize + outputIdxX; const int numOutputs = outputsX * outputsX; //const int numOutputs = imgSize * imgSize; const int imgPixels = imgSize * imgSize; const int startImgPxX = startX + outputIdxX * strideX; const int startImgPxY = startX + outputIdxY * strideX; const int imgIdx = blockImgIdx + threadIdx.x; maxout_h += myFilterIdx * imgPixels * numImages + imgIdx; hGrads += myFilterIdx * imgPixels * numImages + imgIdx; target_z += myFilterIdx * imgPixels * numImages + imgIdx; maxout_p += (myFilterIdx * numOutputs + outputIdx) * numImages + imgIdx; pGrads += (myFilterIdx * numOutputs + outputIdx) * numImages + imgIdx; target_t += (myFilterIdx * numOutputs + outputIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; for (int f = 0; f < filtersPerThread; f++) { for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } const int loopStartY = MAX(0, startImgPxY); const int loopStartX = MAX(0, startImgPxX); const int loopEndY = MIN(imgSize, startImgPxY + subsX); const int loopEndX = MIN(imgSize, startImgPxX + subsX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { for (int f = 0; f < filtersPerThread; f++) { const float ma = maxout_h[(f * imgPixels + imgPx) * numImages + i * B_X]; const float mg = hGrads[(f * imgPixels + imgPx) * numImages + i * B_X]; prod[f][i] += ma * mg; } } } } } for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { for (int f = 0; f < filtersPerThread; f++) { prod[f][i] -= (1 - maxout_p[f*numOutputs*numImages + i * B_X]) * pGrads[f*numOutputs*numImages + i * B_X]; } } } for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { for (int f = 0; f < filtersPerThread; f++) { const float ma = maxout_h[(f * imgPixels + imgPx) * numImages + i * B_X]; const float mg = hGrads[(f * imgPixels + imgPx) * numImages + i * B_X]; target_z[(f*imgPixels + imgPx) * numImages + i * B_X] = ma * mg - (prod[f][i] * ma); } } } } } for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { for (int f = 0; f < filtersPerThread; f++) { const float ma = -maxout_p[f*numOutputs*numImages + i * B_X]; const float mg = pGrads[f*numOutputs*numImages + i * B_X]; target_t[f*numOutputs*numImages + i * B_X] = -(ma * mg - (prod[f][i] * ma)); } } } } /* * acts := -2 x scale x acts x outGrads / denoms */ template<int B_X, int eltsPerThread> __global__ void kRNormUndoPrelims(float* acts, float* denoms, float* outGrads, const uint numElements, const float scale) { const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x; const uint numThreads = B_X * gridDim.x; for (uint i = e; i < numElements; i += numThreads*eltsPerThread) { #pragma unroll for (uint k = 0; k < eltsPerThread; k++) { if (i + k * B_X < numElements) { acts[i + k * B_X] = __fdividef(scale*outGrads[i + k * B_X] * acts[i + k * B_X], denoms[i + k * B_X]); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int imgPixels = imgSize * imgSize; const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1); const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1); const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1); const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1); const int imgIdx = blockImgIdx + threadIdx.x; acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int sy = startY; sy < endY; sy++) { for (int sx = startX; sx < endX; sx++) { const int outPx = sy * imgSize + sx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X]; } } } } } // outGrads += blockPx * numImages; if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { __shared__ float shActs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1); const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1); const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4); const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1; const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1; const int myEndPxY = myPxY + sizeX/2 + 1; const int myEndPxX = myPxX + sizeX/2 + 1; const int imgIdx = blockImgIdx + threadIdx.x; acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += shActs[f][threadIdx.x + i * B_X]; } } } } __syncthreads(); } } acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX; acts += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } } void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target, int subsX, int startX, int strideX, int outputsX) { convLocalMaxUndo(images, maxGrads, maxActs, target, subsX, startX, strideX, outputsX, 0, 1); } /* * imgs: (numFilters * imgPixels, numImages) * maxGrads: (numFilters * numOutputs, numImages) * maxActs: (numFilters * numOutputs, numImages) * target: (numFilters * imgPixels, numImages) */ void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) { int outputs = outputsX * outputsX; int numImages = images.getNumCols(); int numFilters = maxGrads.getNumRows() / outputs; int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(maxGrads.getNumRows() == numFilters * outputs); assert(maxGrads.getNumCols() == numImages); assert(!images.isTrans()); assert(!target.isTrans()); assert(!maxGrads.isTrans()); assert(!maxActs.isTrans()); assert(images.isContiguous()); assert(maxGrads.isContiguous()); assert(maxActs.isContiguous()); assert(maxGrads.isSameDims(maxActs)); assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); target.resize(images); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 4, 2, false, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 4, 2, true, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 4, 2, false, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 4, 2, true, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 2, 2, false, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 2, 2, true, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 2, 2, false, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 2, 2, true, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 1, 2, false, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 1, 2, true, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 1, 2, false, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 1, 2, true, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } cutilCheckMsg("convLocalMaxUndo: kernel execution failed"); } void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize) { convLocalAvgUndo(avgGrads, target, subsX, startX, strideX, outputsX, imgSize, 0, 1); } /* * avgGrads: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize, float scaleTargets, float scaleOutput) { int numImages = avgGrads.getNumCols(); int outputs = outputsX * outputsX; int imgPixels = imgSize * imgSize; int numFilters = avgGrads.getNumRows() / outputs; assert(avgGrads.getNumRows() == numFilters * outputs); assert(!target.isTrans()); assert(!avgGrads.isTrans()); assert(avgGrads.isContiguous()); assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); target.resize(numFilters * imgPixels, numImages); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 4)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 4, 4, false, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 4, 4, true, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 4, 4, false, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 4, 4, true, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 2, 4, false, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 2, 4, true, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 2, 4, false, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 2, 4, true, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 1, 4, false, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 1, 4, true, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 1, 4, false, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 1, 4, true, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } cutilCheckMsg("convLocalAvgUndo: kernel execution failed"); } /* prob max undo */ void localProbMaxUndo(NVMatrix& maxout_h, NVMatrix& maxout_p, NVMatrix& hGrads, NVMatrix& pGrads, NVMatrix& target_z, NVMatrix& target_t, int subsX, int startX, int strideX, int outputsX, int imgSize) { int outputs = outputsX * outputsX; int imgPixels = imgSize * imgSize; int numImages = maxout_h.getNumCols(); int numFilters = maxout_h.getNumRows() / imgPixels; assert(maxout_h.getNumRows() / numFilters == imgPixels); assert(maxout_h.getNumRows() == numFilters * imgPixels); assert(imgSize * imgSize == imgPixels); assert(hGrads.getNumRows() == numFilters * imgPixels); assert(hGrads.getNumCols() == numImages); assert(target_z.getNumRows() == numFilters * imgPixels); assert(target_z.getNumCols() == numImages); assert(maxout_p.getNumRows() == numFilters * outputs); assert(maxout_p.getNumCols() == numImages); assert(pGrads.getNumRows() == numFilters * outputs); assert(pGrads.getNumCols() == numImages); assert(target_t.getNumRows() == numFilters * outputs); assert(target_t.getNumCols() == numImages); assert(!maxout_h.isTrans()); assert(!maxout_p.isTrans()); assert(!target_t.isTrans()); assert(!target_z.isTrans()); assert(!hGrads.isTrans()); assert(!pGrads.isTrans()); assert(maxout_h.isContiguous()); assert(maxout_p.isContiguous()); assert(hGrads.isContiguous()); assert(pGrads.isContiguous()); assert(target_z.isContiguous()); assert(target_t.isContiguous()); assert(numFilters % 16 == 0); assert(strideX <= subsX); target_z.resize(maxout_h); target_t.resize(maxout_p); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { kLocalProbMaxUndo<4, 32, 4, 2, false, true><<<blocks, threads>>>(maxout_h.getDevData(), maxout_p.getDevData(), hGrads.getDevData(), pGrads.getDevData(), target_z.getDevData(), target_t.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX); } else { kLocalProbMaxUndo<4, 32, 4, 2, false, false><<<blocks, threads>>>(maxout_h.getDevData(), maxout_p.getDevData(), hGrads.getDevData(), pGrads.getDevData(), target_z.getDevData(), target_t.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { kLocalProbMaxUndo<4, 32, 2, 2, false, true><<<blocks, threads>>>(maxout_h.getDevData(), maxout_p.getDevData(), hGrads.getDevData(), pGrads.getDevData(), target_z.getDevData(), target_t.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX); } else { kLocalProbMaxUndo<4, 32, 2, 2, false, false><<<blocks, threads>>>(maxout_h.getDevData(), maxout_p.getDevData(), hGrads.getDevData(), pGrads.getDevData(), target_z.getDevData(), target_t.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX); } } else { if (checkCaseBounds) { kLocalProbMaxUndo<4, 32, 1, 2, false, true><<<blocks, threads>>>(maxout_h.getDevData(), maxout_p.getDevData(), hGrads.getDevData(), pGrads.getDevData(), target_z.getDevData(), target_t.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX); } else { kLocalProbMaxUndo<4, 32, 1, 2, false, false><<<blocks, threads>>>(maxout_h.getDevData(), maxout_p.getDevData(), hGrads.getDevData(), pGrads.getDevData(), target_z.getDevData(), target_t.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX); } } cutilCheckMsg("convLocalMaxUndo: kernel execution failed"); } void convResponseNorm(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) { convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) */ void convContrastNorm(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(meanDiffs.isSameDims(images)); assert(!meanDiffs.isTrans()); assert(!images.isTrans()); assert(images.isContiguous()); assert(meanDiffs.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); target.resize(images); denoms.resize(images); assert(target.isContiguous()); if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = 8; int filtersPerThread = 4; int bx = 8; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); assert(numFilters % filtersPerThread == 0); dim3 threads(bx, 16); dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, cudaFuncCachePreferL1); // L1 faster here kCNorm2<8, 8, 4, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, cudaFuncCachePreferL1); // L1 faster here kCNorm2<8, 8, 4, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } } else { bool checkCaseBounds = numImages % 128 != 0; if (numFilters <= 8) { dim3 threads(128); dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize); if (numFilters == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 1, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 1, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 2) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 2, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 2, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 3) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 3, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 3, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 4, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 4, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 5) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 5, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 5, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 6) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 6, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 6, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 7) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 7, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 7, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 8) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 8, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 8, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } } else { dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kCNorm_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kCNorm_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } } } cutilCheckMsg("convResponseNorm: kernel execution failed"); } void convContrastNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& meanDiffs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { int numImages = outGrads.getNumCols(); int imgPixels = outGrads.getNumRows() / numFilters; int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(outGrads.getNumRows() == numFilters * imgPixels); assert(denoms.isSameDims(outGrads)); assert(acts.isSameDims(denoms)); assert(!denoms.isTrans()); assert(!outGrads.isTrans()); assert(!acts.isTrans()); assert(!target.isTrans()); assert(outGrads.isContiguous()); assert(numFilters % 16 == 0); target.resize(outGrads); assert(target.isContiguous()); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 4; dim3 threads(128); dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread)))); kRNormUndoPrelims<128, 4><<<blocks, threads>>>(acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale); // Now the main routine if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; int filtersPerThread = 4; int bx = 16; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); threads = dim3(bx, 16); blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (imgsPerThread == 8) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 4, 4, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 4, 4, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 4, 4, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 4, 4, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 2, 4, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 2, 4, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 2, 4, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 2, 4, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } } else { int imgsPerThread = numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; threads = dim3(32, 4); blocks = dim3(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 1, 2, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 1, 2, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 1, 2, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 1, 2, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } } cutilCheckMsg("kRNormUndo: kernel execution failed"); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) * * imgSize = scale * tgtSize */ void convResizeBilinear(NVMatrix& images, NVMatrix& target, int imgSize, int tgtSize, float scale) { assert(!images.isTrans()); assert(!target.isTrans()); int imgPixels = imgSize * imgSize; int tgtPixels = tgtSize * tgtSize; int numChannels = images.getNumRows() / imgPixels; int numImages = images.getNumCols(); assert(images.getNumRows() == numChannels * imgPixels); target.resize(numChannels * tgtPixels, numImages); assert(target.isContiguous()); int numChunksX = DIVUP(tgtSize, 4); int numChunks = numChunksX * numChunksX; double imgCenter = imgSize * 0.5; double tgtCenter = tgtSize * 0.5; double centerScale = imgCenter - tgtCenter * scale; int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 16); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), numChannels * numChunks); if (imgsPerThread == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kResizeBilinear<4, true>, cudaFuncCachePreferL1); kResizeBilinear<4, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } else { cudaFuncSetCacheConfig(kResizeBilinear<4, false>, cudaFuncCachePreferL1); kResizeBilinear<4, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kResizeBilinear<2, true>, cudaFuncCachePreferL1); kResizeBilinear<2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } else { cudaFuncSetCacheConfig(kResizeBilinear<2, false>, cudaFuncCachePreferL1); kResizeBilinear<2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kResizeBilinear<1, true>, cudaFuncCachePreferL1); kResizeBilinear<1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } else { cudaFuncSetCacheConfig(kResizeBilinear<1, false>, cudaFuncCachePreferL1); kResizeBilinear<1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale); } } cutilCheckMsg("convResizeBilinear: kernel execution failed"); } /* * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) */ void convRGBToYUV(NVMatrix& images, NVMatrix& target) { assert(!images.isTrans()); assert(!target.isTrans()); int imgPixels = images.getNumRows() / 3; int numImages = images.getNumCols(); assert(images.getNumRows() == 3 * imgPixels); target.resize(3 * imgPixels, numImages); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (imgsPerThread == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToYUV<4, true>, cudaFuncCachePreferL1); kRGBToYUV<4, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToYUV<4, false>, cudaFuncCachePreferL1); kRGBToYUV<4, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToYUV<2, true>, cudaFuncCachePreferL1); kRGBToYUV<2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToYUV<2, false>, cudaFuncCachePreferL1); kRGBToYUV<2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToYUV<1, true>, cudaFuncCachePreferL1); kRGBToYUV<1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToYUV<1, false>, cudaFuncCachePreferL1); kRGBToYUV<1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } cutilCheckMsg("convRGBToYUV: kernel execution failed"); } /* * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) */ void convRGBToLAB(NVMatrix& images, NVMatrix& target, bool center) { assert(!images.isTrans()); assert(!target.isTrans()); int imgPixels = images.getNumRows() / 3; int numImages = images.getNumCols(); assert(images.getNumRows() == 3 * imgPixels); target.resize(3 * imgPixels, numImages); assert(target.isContiguous()); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (imgsPerThread == 4) { if (center) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<4, true, true>, cudaFuncCachePreferL1); kRGBToLAB<4, true, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToLAB<4, false, true>, cudaFuncCachePreferL1); kRGBToLAB<4, false, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<4, true, false>, cudaFuncCachePreferL1); kRGBToLAB<4, true, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToLAB<4, false, false>, cudaFuncCachePreferL1); kRGBToLAB<4, false, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } } else if (imgsPerThread == 2) { if (center) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<2, true, true>, cudaFuncCachePreferL1); kRGBToLAB<2, true, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToLAB<2, false, true>, cudaFuncCachePreferL1); kRGBToLAB<2, false, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<2, true, false>, cudaFuncCachePreferL1); kRGBToLAB<2, true, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToLAB<2, false, false>, cudaFuncCachePreferL1); kRGBToLAB<2, false, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } } else { if (center) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<1, true, true>, cudaFuncCachePreferL1); kRGBToLAB<1, true, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToLAB<1, false, true>, cudaFuncCachePreferL1); kRGBToLAB<1, false, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<1, true, false>, cudaFuncCachePreferL1); kRGBToLAB<1, true, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } else { cudaFuncSetCacheConfig(kRGBToLAB<1, false, false>, cudaFuncCachePreferL1); kRGBToLAB<1, false, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride()); } } } cutilCheckMsg("convRGBToLAB: kernel execution failed"); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) */ void convCrop(NVMatrix& imgs, NVMatrix& target, int imgSize, int tgtSize, int startY, int startX) { int numImages = imgs.getNumCols(); int imgPixels = imgSize * imgSize; int tgtPixels = tgtSize * tgtSize; int numChannels = imgs.getNumRows() / imgPixels; assert(imgs.getNumRows() == imgPixels * numChannels); assert(imgPixels == imgSize * imgSize); assert(imgSize - startY >= tgtSize); assert(imgSize - startX >= tgtSize); assert(startY >= 0); assert(startX >= 0); target.resize(numChannels * tgtPixels, numImages); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 blocks(DIVUP(numImages, 32 * imgsPerThread), numChannels * DIVUP(tgtPixels, 4)); dim3 threads(32, 4); if (imgsPerThread == 4) { if (checkCaseBounds) { kCrop<4, true><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } else { kCrop<4, false><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { kCrop<2, true><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } else { kCrop<2, false><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } } else { if (checkCaseBounds) { kCrop<1, true><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } else { kCrop<1, false><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX); } } cutilCheckMsg("convCrop: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * ticas: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) (out) * * Computes TICA-style gradient for given feature maps * f(x) = exp(-(sum_i{x_i^2}^(1/2))) * dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps) * * eps added for numerical stability */ void convTICAGrad(NVMatrix& images, NVMatrix& ticas, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(!images.isTrans()); assert(images.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); assert(ticas.isSameDims(images)); assert(ticas.isContiguous()); if (scaleTarget == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } assert(target.isContiguous()); // TEMPORARY assert(numFilters > 8); assert(sizeX < 6); dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; if (checkCaseBounds) { cudaFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kTICAGrad_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), ticas.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } else { cudaFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kTICAGrad_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), ticas.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } cutilCheckMsg("convTICAGrad: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) (out) * * Computes TICA-style gradient for given feature maps * f(x) = exp(-(sum_i{x_i^2}^(1/2))) * dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps) * * eps added for numerical stability */ void convTICA(NVMatrix& images, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(!images.isTrans()); assert(images.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); if (scaleTarget == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } assert(target.isContiguous()); // TEMPORARY assert(numFilters > 8); assert(sizeX < 6); dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; if (checkCaseBounds) { cudaFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kTICA_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } else { cudaFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kTICA_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput); } cutilCheckMsg("convTICA: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * Note: at present, I have no code to compute the meanDiffs. So it should be set * to be equal to images. In other words, this isn't really doing contrast normalization, * just response normalization. */ void convContrastNormCrossMap(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(meanDiffs.isSameDims(images)); assert(sizeF > 0 && sizeF <= numFilters); assert(!meanDiffs.isTrans()); assert(!images.isTrans()); assert(images.isContiguous()); assert(meanDiffs.isContiguous()); assert(numFilters % 16 == 0); target.resize(images); denoms.resize(images); assert(target.isContiguous()); bool checkCaseBounds = numImages % 128 != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize); if (blocked) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, true, true>, cudaFuncCachePreferL1); kFCNorm<4, 32, 4, true, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } else { cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, false, true>, cudaFuncCachePreferL1); kFCNorm<4, 32, 4, false, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, true, false>, cudaFuncCachePreferL1); kFCNorm<4, 32, 4, true, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } else { cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, false, false>, cudaFuncCachePreferL1); kFCNorm<4, 32, 4, false, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, addScale, powScale); } } cutilCheckMsg("convContrastNormCrossMap: kernel execution failed"); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormCrossMapUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked, float scaleTargets, float scaleOutput) { int numImages = outGrads.getNumCols(); int imgPixels = outGrads.getNumRows() / numFilters; int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(sizeF > 0 && sizeF <= numFilters); assert(outGrads.getNumRows() == numFilters * imgPixels); assert(denoms.isSameDims(outGrads)); assert(acts.isSameDims(denoms)); assert(!denoms.isTrans()); assert(!outGrads.isTrans()); assert(!acts.isTrans()); assert(!target.isTrans()); assert(outGrads.isContiguous()); assert(numFilters % 16 == 0); target.resize(outGrads); assert(target.isContiguous()); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 4; dim3 threads(128); dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread)))); kRNormUndoPrelims<128, 4><<<blocks, threads>>>(acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale); // Now the main routine dim3 threads2 = dim3(32, 4); dim3 blocks2 = dim3(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; if (blocked) { if (scaleTargets == 0 && scaleOutput == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, true>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, false, true, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, true>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, false, false, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, true>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, true, true, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, true>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, true, false, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } } else { if (scaleTargets == 0 && scaleOutput == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, false>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, false, true, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, false>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, false, false, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, false>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, true, true, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, false>, cudaFuncCachePreferL1); kFRNormUndo<4, 32, 4, true, false, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale, scaleTargets, scaleOutput); } } } cutilCheckMsg("convResponseNormCrossMapUndo: kernel execution failed"); } void convResponseNormCrossMap(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) { convContrastNormCrossMap(images, images, denoms, target, numFilters, sizeF, addScale, powScale, blocked); }
648ee3dc101c867fb47faae2bbb3c6038422fdff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from magmablas/ztrtri_lower.cu, normal z -> c, Wed Jan 2 14:18:51 2019 @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar This file implements lower case, and is called by ctrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "magma_internal.h" #define TRTRI_NONBATCHED #include "ctrtri.cuh" #include "ctrtri_lower_device.cuh" /******************************************************************************/ __global__ void ctrtri_diag_lower_kernel( magma_diag_t diag, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *d_dinvA) { ctrtri_diag_lower_device(diag, n, A, lda, d_dinvA); } /******************************************************************************/ __global__ void triple_cgemm16_part1_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm16_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm16_part2_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm16_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm32_part1_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm32_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm32_part2_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm32_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm64_part1_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm64_part2_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm_above64_part1_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm_above64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm_above64_part2_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm_above64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm_above64_part3_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm_above64_part3_lower_device( n, Ain, lda, d_dinvA, jb, npages); }
648ee3dc101c867fb47faae2bbb3c6038422fdff.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from magmablas/ztrtri_lower.cu, normal z -> c, Wed Jan 2 14:18:51 2019 @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar This file implements lower case, and is called by ctrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "magma_internal.h" #define TRTRI_NONBATCHED #include "ctrtri.cuh" #include "ctrtri_lower_device.cuh" /******************************************************************************/ __global__ void ctrtri_diag_lower_kernel( magma_diag_t diag, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *d_dinvA) { ctrtri_diag_lower_device(diag, n, A, lda, d_dinvA); } /******************************************************************************/ __global__ void triple_cgemm16_part1_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm16_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm16_part2_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm16_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm32_part1_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm32_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm32_part2_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm32_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm64_part1_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm64_part2_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm_above64_part1_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm_above64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm_above64_part2_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm_above64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_cgemm_above64_part3_lower_kernel( int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages) { triple_cgemm_above64_part3_lower_device( n, Ain, lda, d_dinvA, jb, npages); }
6cb9b3bd8f88d79aa55df3c64fc1f6e7a17f7b2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "page_decode.cuh" #include <io/utilities/column_buffer.hpp> #include <cudf/hashing/detail/murmurhash3_x86_32.cuh> #include <rmm/exec_policy.hpp> #include <thrust/reduce.h> namespace cudf { namespace io { namespace parquet { namespace gpu { namespace { constexpr int decode_block_size = 128; constexpr int rolling_buf_size = decode_block_size * 2; /** * @brief Output a string descriptor * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dstv Pointer to row output data (string descriptor or 32-bit hash) */ template <typename state_buf> inline __device__ void gpuOutputString(volatile page_state_s* s, volatile state_buf* sb, int src_pos, void* dstv) { auto [ptr, len] = gpuGetStringData(s, sb, src_pos); // make sure to only hash `BYTE_ARRAY` when specified with the output type size if (s->dtype_len == 4 and (s->col.data_type & 7) == BYTE_ARRAY) { // Output hash. This hash value is used if the option to convert strings to // categoricals is enabled. The seed value is chosen arbitrarily. uint32_t constexpr hash_seed = 33; cudf::string_view const sv{ptr, static_cast<size_type>(len)}; *static_cast<uint32_t*>(dstv) = cudf::hashing::detail::MurmurHash3_x86_32<cudf::string_view>{hash_seed}(sv); } else { // Output string descriptor auto* dst = static_cast<string_index_pair*>(dstv); dst->first = ptr; dst->second = len; } } /** * @brief Output a boolean * * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename state_buf> inline __device__ void gpuOutputBoolean(volatile state_buf* sb, int src_pos, uint8_t* dst) { *dst = sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)]; } /** * @brief Store a 32-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint32_t* dst, uint8_t const* src8, uint32_t dict_pos, uint32_t dict_size) { uint32_t bytebuf; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<uint32_t const*>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } *dst = bytebuf; } /** * @brief Store a 64-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint2* dst, uint8_t const* src8, uint32_t dict_pos, uint32_t dict_size) { uint2 v; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { v.x = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } } else { v.x = v.y = 0; } *dst = v; } /** * @brief Convert an INT96 Spark timestamp to 64-bit timestamp * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[out] dst Pointer to row output data */ template <typename state_buf> inline __device__ void gpuOutputInt96Timestamp(volatile page_state_s* s, volatile state_buf* sb, int src_pos, int64_t* dst) { using cuda::std::chrono::duration_cast; uint8_t const* src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 >= dict_size) { *dst = 0; return; } uint3 v; int64_t nanos, days; v.x = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); v.z = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 8); if (ofs) { uint32_t next = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 12); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, v.z, ofs); v.z = __funnelshift_r(v.z, next, ofs); } nanos = v.y; nanos <<= 32; nanos |= v.x; // Convert from Julian day at noon to UTC seconds days = static_cast<int32_t>(v.z); cudf::duration_D d_d{ days - 2440588}; // TBD: Should be noon instead of midnight, but this matches pyarrow *dst = [&]() { switch (s->col.ts_clock_rate) { case 1: // seconds return duration_cast<duration_s>(d_d).count() + duration_cast<duration_s>(duration_ns{nanos}).count(); case 1'000: // milliseconds return duration_cast<duration_ms>(d_d).count() + duration_cast<duration_ms>(duration_ns{nanos}).count(); case 1'000'000: // microseconds return duration_cast<duration_us>(d_d).count() + duration_cast<duration_us>(duration_ns{nanos}).count(); case 1'000'000'000: // nanoseconds default: return duration_cast<cudf::duration_ns>(d_d).count() + nanos; } }(); } /** * @brief Output a 64-bit timestamp * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename state_buf> inline __device__ void gpuOutputInt64Timestamp(volatile page_state_s* s, volatile state_buf* sb, int src_pos, int64_t* dst) { uint8_t const* src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; int64_t ts; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 < dict_size) { uint2 v; int64_t val; int32_t ts_scale; v.x = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } val = v.y; val <<= 32; val |= v.x; // Output to desired clock rate ts_scale = s->ts_scale; if (ts_scale < 0) { // round towards negative infinity int sign = (val < 0); ts = ((val + sign) / -ts_scale) + sign; } else { ts = val * ts_scale; } } else { ts = 0; } *dst = ts; } /** * @brief Output a byte array as int. * * @param[in] ptr Pointer to the byte array * @param[in] len Byte array length * @param[out] dst Pointer to row output data */ template <typename T> __device__ void gpuOutputByteArrayAsInt(char const* ptr, int32_t len, T* dst) { T unscaled = 0; for (auto i = 0; i < len; i++) { uint8_t v = ptr[i]; unscaled = (unscaled << 8) | v; } // Shift the unscaled value up and back down when it isn't all 8 bytes, // which sign extend the value for correctly representing negative numbers. unscaled <<= (sizeof(T) - len) * 8; unscaled >>= (sizeof(T) - len) * 8; *dst = unscaled; } /** * @brief Output a fixed-length byte array as int. * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T, typename state_buf> __device__ void gpuOutputFixedLenByteArrayAsInt(volatile page_state_s* s, volatile state_buf* sb, int src_pos, T* dst) { uint32_t const dtype_len_in = s->dtype_len_in; uint8_t const* data = s->dict_base ? s->dict_base : s->data_start; uint32_t const pos = (s->dict_base ? ((s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0) : src_pos) * dtype_len_in; uint32_t const dict_size = s->dict_size; T unscaled = 0; for (unsigned int i = 0; i < dtype_len_in; i++) { uint32_t v = (pos + i < dict_size) ? data[pos + i] : 0; unscaled = (unscaled << 8) | v; } // Shift the unscaled value up and back down when it isn't all 8 bytes, // which sign extend the value for correctly representing negative numbers. if (dtype_len_in < sizeof(T)) { unscaled <<= (sizeof(T) - dtype_len_in) * 8; unscaled >>= (sizeof(T) - dtype_len_in) * 8; } *dst = unscaled; } /** * @brief Output a small fixed-length value * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T, typename state_buf> inline __device__ void gpuOutputFast(volatile page_state_s* s, volatile state_buf* sb, int src_pos, T* dst) { uint8_t const* dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; gpuStoreOutput(dst, dict, dict_pos, dict_size); } /** * @brief Output a N-byte value * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst8 Pointer to row output data * @param[in] len Length of element */ template <typename state_buf> static __device__ void gpuOutputGeneric( volatile page_state_s* s, volatile state_buf* sb, int src_pos, uint8_t* dst8, int len) { uint8_t const* dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; if (len & 3) { // Generic slow path for (unsigned int i = 0; i < len; i++) { dst8[i] = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; } } else { // Copy 4 bytes at a time uint8_t const* src8 = dict; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits for (unsigned int i = 0; i < len; i += 4) { uint32_t bytebuf; if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<uint32_t const*>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } dict_pos += 4; *reinterpret_cast<uint32_t*>(dst8 + i) = bytebuf; } } } /** * @brief Kernel for computing the column data stored in the pages * * This function will write the page data and the page data's validity to the * output specified in the page's column chunk. If necessary, additional * conversion will be performed to translate from the Parquet datatype to * desired output datatype (ex. 32-bit to 16-bit, string to hash). * * @param pages List of pages * @param chunks List of column chunks * @param min_row Row index to start reading at * @param num_rows Maximum number of rows to read */ template <int lvl_buf_size, typename level_t> __global__ void __launch_bounds__(decode_block_size) gpuDecodePageData( PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows) { __shared__ __align__(16) page_state_s state_g; __shared__ __align__(16) page_state_buffers_s<rolling_buf_size, rolling_buf_size, rolling_buf_size> state_buffers; page_state_s* const s = &state_g; auto* const sb = &state_buffers; int page_idx = blockIdx.x; int t = threadIdx.x; int out_thread0; [[maybe_unused]] null_count_back_copier _{s, t}; if (!setupLocalPageInfo( s, &pages[page_idx], chunks, min_row, num_rows, mask_filter{KERNEL_MASK_GENERAL}, true)) { return; } bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0; if (s->dict_base) { out_thread0 = (s->dict_bits > 0) ? 64 : 32; } else { switch (s->col.data_type & 7) { case BOOLEAN: [[fallthrough]]; case BYTE_ARRAY: [[fallthrough]]; case FIXED_LEN_BYTE_ARRAY: out_thread0 = 64; break; default: out_thread0 = 32; } } PageNestingDecodeInfo* nesting_info_base = s->nesting_info; __shared__ level_t rep[rolling_buf_size]; // circular buffer of repetition level values __shared__ level_t def[rolling_buf_size]; // circular buffer of definition level values // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t skipped_leaf_values = s->page.skipped_leaf_values; while (!s->error && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { int target_pos; int src_pos = s->src_pos; if (t < out_thread0) { target_pos = min(src_pos + 2 * (decode_block_size - out_thread0), s->nz_count + (decode_block_size - out_thread0)); } else { target_pos = min(s->nz_count, src_pos + decode_block_size - out_thread0); if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); } } __syncthreads(); if (t < 32) { // decode repetition and definition levels. // - update validity vectors // - updates offsets (for nested columns) // - produces non-NULL value indices in s->nz_idx for subsequent decoding gpuDecodeLevels<lvl_buf_size, level_t>(s, sb, target_pos, rep, def, t); } else if (t < out_thread0) { // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t src_target_pos = target_pos + skipped_leaf_values; // WARP1: Decode dictionary indices, booleans or string positions if (s->dict_base) { src_target_pos = gpuDecodeDictionaryIndices<false>(s, sb, src_target_pos, t & 0x1f).first; } else if ((s->col.data_type & 7) == BOOLEAN) { src_target_pos = gpuDecodeRleBooleans(s, sb, src_target_pos, t & 0x1f); } else if ((s->col.data_type & 7) == BYTE_ARRAY or (s->col.data_type & 7) == FIXED_LEN_BYTE_ARRAY) { gpuInitStringDescriptors<false>(s, sb, src_target_pos, t & 0x1f); } if (t == 32) { *(volatile int32_t*)&s->dict_pos = src_target_pos; } } else { // WARP1..WARP3: Decode values int const dtype = s->col.data_type & 7; src_pos += t - out_thread0; // the position in the output column/buffer int dst_pos = sb->nz_idx[rolling_index<rolling_buf_size>(src_pos)]; // for the flat hierarchy case we will be reading from the beginning of the value stream, // regardless of the value of first_row. so adjust our destination offset accordingly. // example: // - user has passed skip_rows = 2, so our first_row to output is 2 // - the row values we get from nz_idx will be // 0, 1, 2, 3, 4 .... // - by shifting these values by first_row, the sequence becomes // -1, -2, 0, 1, 2 ... // - so we will end up ignoring the first two input rows, and input rows 2..n will // get written to the output starting at position 0. // if (!has_repetition) { dst_pos -= s->first_row; } // target_pos will always be properly bounded by num_rows, but dst_pos may be negative (values // before first_row) in the flat hierarchy case. if (src_pos < target_pos && dst_pos >= 0) { // src_pos represents the logical row position we want to read from. But in the case of // nested hierarchies, there is no 1:1 mapping of rows to values. So our true read position // has to take into account the # of values we have to skip in the page to get to the // desired logical row. For flat hierarchies, skipped_leaf_values will always be 0. uint32_t val_src_pos = src_pos + skipped_leaf_values; // nesting level that is storing actual leaf values int leaf_level_index = s->col.max_nesting_depth - 1; uint32_t dtype_len = s->dtype_len; void* dst = nesting_info_base[leaf_level_index].data_out + static_cast<size_t>(dst_pos) * dtype_len; if (dtype == BYTE_ARRAY) { if (s->col.converted_type == DECIMAL) { auto const [ptr, len] = gpuGetStringData(s, sb, val_src_pos); auto const decimal_precision = s->col.decimal_precision; if (decimal_precision <= MAX_DECIMAL32_PRECISION) { gpuOutputByteArrayAsInt(ptr, len, static_cast<int32_t*>(dst)); } else if (decimal_precision <= MAX_DECIMAL64_PRECISION) { gpuOutputByteArrayAsInt(ptr, len, static_cast<int64_t*>(dst)); } else { gpuOutputByteArrayAsInt(ptr, len, static_cast<__int128_t*>(dst)); } } else { gpuOutputString(s, sb, val_src_pos, dst); } } else if (dtype == BOOLEAN) { gpuOutputBoolean(sb, val_src_pos, static_cast<uint8_t*>(dst)); } else if (s->col.converted_type == DECIMAL) { switch (dtype) { case INT32: gpuOutputFast(s, sb, val_src_pos, static_cast<uint32_t*>(dst)); break; case INT64: gpuOutputFast(s, sb, val_src_pos, static_cast<uint2*>(dst)); break; default: if (s->dtype_len_in <= sizeof(int32_t)) { gpuOutputFixedLenByteArrayAsInt(s, sb, val_src_pos, static_cast<int32_t*>(dst)); } else if (s->dtype_len_in <= sizeof(int64_t)) { gpuOutputFixedLenByteArrayAsInt(s, sb, val_src_pos, static_cast<int64_t*>(dst)); } else { gpuOutputFixedLenByteArrayAsInt(s, sb, val_src_pos, static_cast<__int128_t*>(dst)); } break; } } else if (dtype == FIXED_LEN_BYTE_ARRAY) { gpuOutputString(s, sb, val_src_pos, dst); } else if (dtype == INT96) { gpuOutputInt96Timestamp(s, sb, val_src_pos, static_cast<int64_t*>(dst)); } else if (dtype_len == 8) { if (s->dtype_len_in == 4) { // Reading INT32 TIME_MILLIS into 64-bit DURATION_MILLISECONDS // TIME_MILLIS is the only duration type stored as int32: // https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#deprecated-time-convertedtype gpuOutputFast(s, sb, val_src_pos, static_cast<uint32_t*>(dst)); } else if (s->ts_scale) { gpuOutputInt64Timestamp(s, sb, val_src_pos, static_cast<int64_t*>(dst)); } else { gpuOutputFast(s, sb, val_src_pos, static_cast<uint2*>(dst)); } } else if (dtype_len == 4) { gpuOutputFast(s, sb, val_src_pos, static_cast<uint32_t*>(dst)); } else { gpuOutputGeneric(s, sb, val_src_pos, static_cast<uint8_t*>(dst), dtype_len); } } if (t == out_thread0) { *(volatile int32_t*)&s->src_pos = target_pos; } } __syncthreads(); } } struct mask_tform { __device__ uint32_t operator()(PageInfo const& p) { return p.kernel_mask; } }; } // anonymous namespace uint32_t GetAggregatedDecodeKernelMask(cudf::detail::hostdevice_vector<PageInfo>& pages, rmm::cuda_stream_view stream) { // determine which kernels to invoke auto mask_iter = thrust::make_transform_iterator(pages.d_begin(), mask_tform{}); return thrust::reduce( rmm::exec_policy(stream), mask_iter, mask_iter + pages.size(), 0U, thrust::bit_or<uint32_t>{}); } /** * @copydoc cudf::io::parquet::gpu::DecodePageData */ void __host__ DecodePageData(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, size_t num_rows, size_t min_row, int level_type_size, rmm::cuda_stream_view stream) { CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); dim3 dim_block(decode_block_size, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page if (level_type_size == 1) { hipLaunchKernelGGL(( gpuDecodePageData<rolling_buf_size, uint8_t>) , dim3(dim_grid), dim3(dim_block), 0, stream.value(), pages.device_ptr(), chunks, min_row, num_rows); } else { hipLaunchKernelGGL(( gpuDecodePageData<rolling_buf_size, uint16_t>) , dim3(dim_grid), dim3(dim_block), 0, stream.value(), pages.device_ptr(), chunks, min_row, num_rows); } } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
6cb9b3bd8f88d79aa55df3c64fc1f6e7a17f7b2a.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "page_decode.cuh" #include <io/utilities/column_buffer.hpp> #include <cudf/hashing/detail/murmurhash3_x86_32.cuh> #include <rmm/exec_policy.hpp> #include <thrust/reduce.h> namespace cudf { namespace io { namespace parquet { namespace gpu { namespace { constexpr int decode_block_size = 128; constexpr int rolling_buf_size = decode_block_size * 2; /** * @brief Output a string descriptor * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dstv Pointer to row output data (string descriptor or 32-bit hash) */ template <typename state_buf> inline __device__ void gpuOutputString(volatile page_state_s* s, volatile state_buf* sb, int src_pos, void* dstv) { auto [ptr, len] = gpuGetStringData(s, sb, src_pos); // make sure to only hash `BYTE_ARRAY` when specified with the output type size if (s->dtype_len == 4 and (s->col.data_type & 7) == BYTE_ARRAY) { // Output hash. This hash value is used if the option to convert strings to // categoricals is enabled. The seed value is chosen arbitrarily. uint32_t constexpr hash_seed = 33; cudf::string_view const sv{ptr, static_cast<size_type>(len)}; *static_cast<uint32_t*>(dstv) = cudf::hashing::detail::MurmurHash3_x86_32<cudf::string_view>{hash_seed}(sv); } else { // Output string descriptor auto* dst = static_cast<string_index_pair*>(dstv); dst->first = ptr; dst->second = len; } } /** * @brief Output a boolean * * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename state_buf> inline __device__ void gpuOutputBoolean(volatile state_buf* sb, int src_pos, uint8_t* dst) { *dst = sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)]; } /** * @brief Store a 32-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint32_t* dst, uint8_t const* src8, uint32_t dict_pos, uint32_t dict_size) { uint32_t bytebuf; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<uint32_t const*>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } *dst = bytebuf; } /** * @brief Store a 64-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint2* dst, uint8_t const* src8, uint32_t dict_pos, uint32_t dict_size) { uint2 v; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { v.x = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } } else { v.x = v.y = 0; } *dst = v; } /** * @brief Convert an INT96 Spark timestamp to 64-bit timestamp * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[out] dst Pointer to row output data */ template <typename state_buf> inline __device__ void gpuOutputInt96Timestamp(volatile page_state_s* s, volatile state_buf* sb, int src_pos, int64_t* dst) { using cuda::std::chrono::duration_cast; uint8_t const* src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 >= dict_size) { *dst = 0; return; } uint3 v; int64_t nanos, days; v.x = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); v.z = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 8); if (ofs) { uint32_t next = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 12); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, v.z, ofs); v.z = __funnelshift_r(v.z, next, ofs); } nanos = v.y; nanos <<= 32; nanos |= v.x; // Convert from Julian day at noon to UTC seconds days = static_cast<int32_t>(v.z); cudf::duration_D d_d{ days - 2440588}; // TBD: Should be noon instead of midnight, but this matches pyarrow *dst = [&]() { switch (s->col.ts_clock_rate) { case 1: // seconds return duration_cast<duration_s>(d_d).count() + duration_cast<duration_s>(duration_ns{nanos}).count(); case 1'000: // milliseconds return duration_cast<duration_ms>(d_d).count() + duration_cast<duration_ms>(duration_ns{nanos}).count(); case 1'000'000: // microseconds return duration_cast<duration_us>(d_d).count() + duration_cast<duration_us>(duration_ns{nanos}).count(); case 1'000'000'000: // nanoseconds default: return duration_cast<cudf::duration_ns>(d_d).count() + nanos; } }(); } /** * @brief Output a 64-bit timestamp * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename state_buf> inline __device__ void gpuOutputInt64Timestamp(volatile page_state_s* s, volatile state_buf* sb, int src_pos, int64_t* dst) { uint8_t const* src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; int64_t ts; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 < dict_size) { uint2 v; int64_t val; int32_t ts_scale; v.x = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } val = v.y; val <<= 32; val |= v.x; // Output to desired clock rate ts_scale = s->ts_scale; if (ts_scale < 0) { // round towards negative infinity int sign = (val < 0); ts = ((val + sign) / -ts_scale) + sign; } else { ts = val * ts_scale; } } else { ts = 0; } *dst = ts; } /** * @brief Output a byte array as int. * * @param[in] ptr Pointer to the byte array * @param[in] len Byte array length * @param[out] dst Pointer to row output data */ template <typename T> __device__ void gpuOutputByteArrayAsInt(char const* ptr, int32_t len, T* dst) { T unscaled = 0; for (auto i = 0; i < len; i++) { uint8_t v = ptr[i]; unscaled = (unscaled << 8) | v; } // Shift the unscaled value up and back down when it isn't all 8 bytes, // which sign extend the value for correctly representing negative numbers. unscaled <<= (sizeof(T) - len) * 8; unscaled >>= (sizeof(T) - len) * 8; *dst = unscaled; } /** * @brief Output a fixed-length byte array as int. * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T, typename state_buf> __device__ void gpuOutputFixedLenByteArrayAsInt(volatile page_state_s* s, volatile state_buf* sb, int src_pos, T* dst) { uint32_t const dtype_len_in = s->dtype_len_in; uint8_t const* data = s->dict_base ? s->dict_base : s->data_start; uint32_t const pos = (s->dict_base ? ((s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0) : src_pos) * dtype_len_in; uint32_t const dict_size = s->dict_size; T unscaled = 0; for (unsigned int i = 0; i < dtype_len_in; i++) { uint32_t v = (pos + i < dict_size) ? data[pos + i] : 0; unscaled = (unscaled << 8) | v; } // Shift the unscaled value up and back down when it isn't all 8 bytes, // which sign extend the value for correctly representing negative numbers. if (dtype_len_in < sizeof(T)) { unscaled <<= (sizeof(T) - dtype_len_in) * 8; unscaled >>= (sizeof(T) - dtype_len_in) * 8; } *dst = unscaled; } /** * @brief Output a small fixed-length value * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T, typename state_buf> inline __device__ void gpuOutputFast(volatile page_state_s* s, volatile state_buf* sb, int src_pos, T* dst) { uint8_t const* dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; gpuStoreOutput(dst, dict, dict_pos, dict_size); } /** * @brief Output a N-byte value * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst8 Pointer to row output data * @param[in] len Length of element */ template <typename state_buf> static __device__ void gpuOutputGeneric( volatile page_state_s* s, volatile state_buf* sb, int src_pos, uint8_t* dst8, int len) { uint8_t const* dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; if (len & 3) { // Generic slow path for (unsigned int i = 0; i < len; i++) { dst8[i] = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; } } else { // Copy 4 bytes at a time uint8_t const* src8 = dict; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits for (unsigned int i = 0; i < len; i += 4) { uint32_t bytebuf; if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<uint32_t const*>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } dict_pos += 4; *reinterpret_cast<uint32_t*>(dst8 + i) = bytebuf; } } } /** * @brief Kernel for computing the column data stored in the pages * * This function will write the page data and the page data's validity to the * output specified in the page's column chunk. If necessary, additional * conversion will be performed to translate from the Parquet datatype to * desired output datatype (ex. 32-bit to 16-bit, string to hash). * * @param pages List of pages * @param chunks List of column chunks * @param min_row Row index to start reading at * @param num_rows Maximum number of rows to read */ template <int lvl_buf_size, typename level_t> __global__ void __launch_bounds__(decode_block_size) gpuDecodePageData( PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows) { __shared__ __align__(16) page_state_s state_g; __shared__ __align__(16) page_state_buffers_s<rolling_buf_size, rolling_buf_size, rolling_buf_size> state_buffers; page_state_s* const s = &state_g; auto* const sb = &state_buffers; int page_idx = blockIdx.x; int t = threadIdx.x; int out_thread0; [[maybe_unused]] null_count_back_copier _{s, t}; if (!setupLocalPageInfo( s, &pages[page_idx], chunks, min_row, num_rows, mask_filter{KERNEL_MASK_GENERAL}, true)) { return; } bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0; if (s->dict_base) { out_thread0 = (s->dict_bits > 0) ? 64 : 32; } else { switch (s->col.data_type & 7) { case BOOLEAN: [[fallthrough]]; case BYTE_ARRAY: [[fallthrough]]; case FIXED_LEN_BYTE_ARRAY: out_thread0 = 64; break; default: out_thread0 = 32; } } PageNestingDecodeInfo* nesting_info_base = s->nesting_info; __shared__ level_t rep[rolling_buf_size]; // circular buffer of repetition level values __shared__ level_t def[rolling_buf_size]; // circular buffer of definition level values // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t skipped_leaf_values = s->page.skipped_leaf_values; while (!s->error && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { int target_pos; int src_pos = s->src_pos; if (t < out_thread0) { target_pos = min(src_pos + 2 * (decode_block_size - out_thread0), s->nz_count + (decode_block_size - out_thread0)); } else { target_pos = min(s->nz_count, src_pos + decode_block_size - out_thread0); if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); } } __syncthreads(); if (t < 32) { // decode repetition and definition levels. // - update validity vectors // - updates offsets (for nested columns) // - produces non-NULL value indices in s->nz_idx for subsequent decoding gpuDecodeLevels<lvl_buf_size, level_t>(s, sb, target_pos, rep, def, t); } else if (t < out_thread0) { // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t src_target_pos = target_pos + skipped_leaf_values; // WARP1: Decode dictionary indices, booleans or string positions if (s->dict_base) { src_target_pos = gpuDecodeDictionaryIndices<false>(s, sb, src_target_pos, t & 0x1f).first; } else if ((s->col.data_type & 7) == BOOLEAN) { src_target_pos = gpuDecodeRleBooleans(s, sb, src_target_pos, t & 0x1f); } else if ((s->col.data_type & 7) == BYTE_ARRAY or (s->col.data_type & 7) == FIXED_LEN_BYTE_ARRAY) { gpuInitStringDescriptors<false>(s, sb, src_target_pos, t & 0x1f); } if (t == 32) { *(volatile int32_t*)&s->dict_pos = src_target_pos; } } else { // WARP1..WARP3: Decode values int const dtype = s->col.data_type & 7; src_pos += t - out_thread0; // the position in the output column/buffer int dst_pos = sb->nz_idx[rolling_index<rolling_buf_size>(src_pos)]; // for the flat hierarchy case we will be reading from the beginning of the value stream, // regardless of the value of first_row. so adjust our destination offset accordingly. // example: // - user has passed skip_rows = 2, so our first_row to output is 2 // - the row values we get from nz_idx will be // 0, 1, 2, 3, 4 .... // - by shifting these values by first_row, the sequence becomes // -1, -2, 0, 1, 2 ... // - so we will end up ignoring the first two input rows, and input rows 2..n will // get written to the output starting at position 0. // if (!has_repetition) { dst_pos -= s->first_row; } // target_pos will always be properly bounded by num_rows, but dst_pos may be negative (values // before first_row) in the flat hierarchy case. if (src_pos < target_pos && dst_pos >= 0) { // src_pos represents the logical row position we want to read from. But in the case of // nested hierarchies, there is no 1:1 mapping of rows to values. So our true read position // has to take into account the # of values we have to skip in the page to get to the // desired logical row. For flat hierarchies, skipped_leaf_values will always be 0. uint32_t val_src_pos = src_pos + skipped_leaf_values; // nesting level that is storing actual leaf values int leaf_level_index = s->col.max_nesting_depth - 1; uint32_t dtype_len = s->dtype_len; void* dst = nesting_info_base[leaf_level_index].data_out + static_cast<size_t>(dst_pos) * dtype_len; if (dtype == BYTE_ARRAY) { if (s->col.converted_type == DECIMAL) { auto const [ptr, len] = gpuGetStringData(s, sb, val_src_pos); auto const decimal_precision = s->col.decimal_precision; if (decimal_precision <= MAX_DECIMAL32_PRECISION) { gpuOutputByteArrayAsInt(ptr, len, static_cast<int32_t*>(dst)); } else if (decimal_precision <= MAX_DECIMAL64_PRECISION) { gpuOutputByteArrayAsInt(ptr, len, static_cast<int64_t*>(dst)); } else { gpuOutputByteArrayAsInt(ptr, len, static_cast<__int128_t*>(dst)); } } else { gpuOutputString(s, sb, val_src_pos, dst); } } else if (dtype == BOOLEAN) { gpuOutputBoolean(sb, val_src_pos, static_cast<uint8_t*>(dst)); } else if (s->col.converted_type == DECIMAL) { switch (dtype) { case INT32: gpuOutputFast(s, sb, val_src_pos, static_cast<uint32_t*>(dst)); break; case INT64: gpuOutputFast(s, sb, val_src_pos, static_cast<uint2*>(dst)); break; default: if (s->dtype_len_in <= sizeof(int32_t)) { gpuOutputFixedLenByteArrayAsInt(s, sb, val_src_pos, static_cast<int32_t*>(dst)); } else if (s->dtype_len_in <= sizeof(int64_t)) { gpuOutputFixedLenByteArrayAsInt(s, sb, val_src_pos, static_cast<int64_t*>(dst)); } else { gpuOutputFixedLenByteArrayAsInt(s, sb, val_src_pos, static_cast<__int128_t*>(dst)); } break; } } else if (dtype == FIXED_LEN_BYTE_ARRAY) { gpuOutputString(s, sb, val_src_pos, dst); } else if (dtype == INT96) { gpuOutputInt96Timestamp(s, sb, val_src_pos, static_cast<int64_t*>(dst)); } else if (dtype_len == 8) { if (s->dtype_len_in == 4) { // Reading INT32 TIME_MILLIS into 64-bit DURATION_MILLISECONDS // TIME_MILLIS is the only duration type stored as int32: // https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#deprecated-time-convertedtype gpuOutputFast(s, sb, val_src_pos, static_cast<uint32_t*>(dst)); } else if (s->ts_scale) { gpuOutputInt64Timestamp(s, sb, val_src_pos, static_cast<int64_t*>(dst)); } else { gpuOutputFast(s, sb, val_src_pos, static_cast<uint2*>(dst)); } } else if (dtype_len == 4) { gpuOutputFast(s, sb, val_src_pos, static_cast<uint32_t*>(dst)); } else { gpuOutputGeneric(s, sb, val_src_pos, static_cast<uint8_t*>(dst), dtype_len); } } if (t == out_thread0) { *(volatile int32_t*)&s->src_pos = target_pos; } } __syncthreads(); } } struct mask_tform { __device__ uint32_t operator()(PageInfo const& p) { return p.kernel_mask; } }; } // anonymous namespace uint32_t GetAggregatedDecodeKernelMask(cudf::detail::hostdevice_vector<PageInfo>& pages, rmm::cuda_stream_view stream) { // determine which kernels to invoke auto mask_iter = thrust::make_transform_iterator(pages.d_begin(), mask_tform{}); return thrust::reduce( rmm::exec_policy(stream), mask_iter, mask_iter + pages.size(), 0U, thrust::bit_or<uint32_t>{}); } /** * @copydoc cudf::io::parquet::gpu::DecodePageData */ void __host__ DecodePageData(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, size_t num_rows, size_t min_row, int level_type_size, rmm::cuda_stream_view stream) { CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); dim3 dim_block(decode_block_size, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page if (level_type_size == 1) { gpuDecodePageData<rolling_buf_size, uint8_t> <<<dim_grid, dim_block, 0, stream.value()>>>(pages.device_ptr(), chunks, min_row, num_rows); } else { gpuDecodePageData<rolling_buf_size, uint16_t> <<<dim_grid, dim_block, 0, stream.value()>>>(pages.device_ptr(), chunks, min_row, num_rows); } } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
3ee02a78c92a5621586e30ce5ca285f3547bd41c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <vector> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/gpu/graph_send_recv_funcs.h" #include "paddle/phi/kernels/graph_send_recv_grad_kernel.h" namespace phi { template <typename Context, typename T, typename IndexT> void GraphSendRecvGradOpCUDAKernelLaunchHelper( const Context& ctx, const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& src_index, const DenseTensor& dst_index, const std::string& pool_type, DenseTensor* x_grad, const DenseTensor* dst_count = nullptr, const DenseTensor* out = nullptr) { const int& index_size = dst_index.dims()[0]; ctx.template Alloc<T>(x_grad); T* p_output = x_grad->data<T>(); const auto& src_dims = x.dims(); int64_t memset_size = 1; for (int i = 0; i < src_dims.size(); ++i) { memset_size *= src_dims[i]; } const size_t& memset_bytes = memset_size * sizeof(T); #ifdef PADDLE_WITH_HIP hipMemset(p_output, 0, memset_bytes); #else hipMemset(p_output, 0, memset_bytes); #endif if (index_size == 0) return; int64_t slice_size = 1; for (int i = 1; i < src_dims.size(); ++i) { slice_size *= src_dims[i]; } const T* p_src = out_grad.data<T>(); const IndexT* s_index = src_index.data<IndexT>(); const IndexT* d_index = dst_index.data<IndexT>(); #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int64_t n = slice_size * index_size; int64_t max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0]; int64_t grid_tmp = (n + block - 1) / block; int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; int64_t input_size = src_dims[0]; if (pool_type == "SUM") { GraphSendRecvSumCUDAFunctor<T, IndexT> functor; hipLaunchKernelGGL(( GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvSumCUDAFunctor<T, IndexT>>) , dim3(grid), dim3(block), 0, ctx.stream(), p_src, d_index, s_index, p_output, index_size, slice_size, functor); } else if (pool_type == "MEAN") { const int32_t* s_count = dst_count->data<int32_t>(); hipLaunchKernelGGL(( ManipulateMeanGradCUDAKernel<T, IndexT>), dim3(grid), dim3(block), 0, ctx.stream(), p_src, d_index, s_index, p_output, index_size, slice_size, s_count); } else if (pool_type == "MAX" || pool_type == "MIN") { const T* ptr_input = x.data<T>(); const T* ptr_output = out->data<T>(); hipLaunchKernelGGL(( ManipulateMinMaxGradCUDAKernel<T, IndexT>) , dim3(grid), dim3(block), 0, ctx.stream(), p_src, d_index, s_index, p_output, index_size, slice_size, ptr_input, ptr_output); } } template <typename T, typename Context> void GraphSendRecvGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& src_index, const DenseTensor& dst_index, const paddle::optional<DenseTensor>& out, const paddle::optional<DenseTensor>& dst_count, const DenseTensor& out_grad, const std::string& pool_type, DenseTensor* x_grad) { auto index_type = src_index.dtype(); if (index_type == phi::DataType::INT32) { GraphSendRecvGradOpCUDAKernelLaunchHelper<Context, T, int32_t>( ctx, out_grad, x, src_index, dst_index, pool_type, x_grad, dst_count.get_ptr(), out.get_ptr()); } else if (index_type == phi::DataType::INT64) { GraphSendRecvGradOpCUDAKernelLaunchHelper<Context, T, int64_t>( ctx, out_grad, x, src_index, dst_index, pool_type, x_grad, dst_count.get_ptr(), out.get_ptr()); } } } // namespace phi PD_REGISTER_KERNEL(graph_send_recv_grad, GPU, ALL_LAYOUT, phi::GraphSendRecvGradKernel, float, double, int, int64_t) {}
3ee02a78c92a5621586e30ce5ca285f3547bd41c.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <vector> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/gpu/graph_send_recv_funcs.h" #include "paddle/phi/kernels/graph_send_recv_grad_kernel.h" namespace phi { template <typename Context, typename T, typename IndexT> void GraphSendRecvGradOpCUDAKernelLaunchHelper( const Context& ctx, const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& src_index, const DenseTensor& dst_index, const std::string& pool_type, DenseTensor* x_grad, const DenseTensor* dst_count = nullptr, const DenseTensor* out = nullptr) { const int& index_size = dst_index.dims()[0]; ctx.template Alloc<T>(x_grad); T* p_output = x_grad->data<T>(); const auto& src_dims = x.dims(); int64_t memset_size = 1; for (int i = 0; i < src_dims.size(); ++i) { memset_size *= src_dims[i]; } const size_t& memset_bytes = memset_size * sizeof(T); #ifdef PADDLE_WITH_HIP hipMemset(p_output, 0, memset_bytes); #else cudaMemset(p_output, 0, memset_bytes); #endif if (index_size == 0) return; int64_t slice_size = 1; for (int i = 1; i < src_dims.size(); ++i) { slice_size *= src_dims[i]; } const T* p_src = out_grad.data<T>(); const IndexT* s_index = src_index.data<IndexT>(); const IndexT* d_index = dst_index.data<IndexT>(); #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int64_t n = slice_size * index_size; int64_t max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0]; int64_t grid_tmp = (n + block - 1) / block; int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; int64_t input_size = src_dims[0]; if (pool_type == "SUM") { GraphSendRecvSumCUDAFunctor<T, IndexT> functor; GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvSumCUDAFunctor<T, IndexT>> <<<grid, block, 0, ctx.stream()>>>( p_src, d_index, s_index, p_output, index_size, slice_size, functor); } else if (pool_type == "MEAN") { const int32_t* s_count = dst_count->data<int32_t>(); ManipulateMeanGradCUDAKernel<T, IndexT><<<grid, block, 0, ctx.stream()>>>( p_src, d_index, s_index, p_output, index_size, slice_size, s_count); } else if (pool_type == "MAX" || pool_type == "MIN") { const T* ptr_input = x.data<T>(); const T* ptr_output = out->data<T>(); ManipulateMinMaxGradCUDAKernel<T, IndexT> <<<grid, block, 0, ctx.stream()>>>(p_src, d_index, s_index, p_output, index_size, slice_size, ptr_input, ptr_output); } } template <typename T, typename Context> void GraphSendRecvGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& src_index, const DenseTensor& dst_index, const paddle::optional<DenseTensor>& out, const paddle::optional<DenseTensor>& dst_count, const DenseTensor& out_grad, const std::string& pool_type, DenseTensor* x_grad) { auto index_type = src_index.dtype(); if (index_type == phi::DataType::INT32) { GraphSendRecvGradOpCUDAKernelLaunchHelper<Context, T, int32_t>( ctx, out_grad, x, src_index, dst_index, pool_type, x_grad, dst_count.get_ptr(), out.get_ptr()); } else if (index_type == phi::DataType::INT64) { GraphSendRecvGradOpCUDAKernelLaunchHelper<Context, T, int64_t>( ctx, out_grad, x, src_index, dst_index, pool_type, x_grad, dst_count.get_ptr(), out.get_ptr()); } } } // namespace phi PD_REGISTER_KERNEL(graph_send_recv_grad, GPU, ALL_LAYOUT, phi::GraphSendRecvGradKernel, float, double, int, int64_t) {}
ec9aacdf7d9c737574338c594da27a4f4465d009.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include "j2d5pt-256-10-128_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 10 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 3 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ hipError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != hipSuccess) { \ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == hipSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(hipGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice)); #ifdef STENCILBENCH hipDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 236; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { hipLaunchKernelGGL(( kernel0_10), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 254; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 254; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 254; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 4) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 5) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 6) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 7) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 8) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 9) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 254; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 4) { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 5) { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 6) { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 7) { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 242; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 8) { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 240; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 9) { const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 238; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH hipDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost)); } cudaCheckReturn(hipFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = (5.1f * A[t%2][i-1][j] + 12.1f * A[t%2][i][j-1] + 15.0f * A[t%2][i][j] + 12.2f * A[t%2][i][j+1] + 5.2f * A[t%2][i+1][j]) / 118; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
ec9aacdf7d9c737574338c594da27a4f4465d009.cu
#include <assert.h> #include <stdio.h> #include "j2d5pt-256-10-128_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 10 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 3 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ cudaError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != cudaSuccess) { \ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == cudaSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(cudaGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice)); #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 236; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { kernel0_10<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 254; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 254; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 254; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 4) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 5) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 6) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 7) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 8) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 9) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 254; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 4) { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 5) { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 6) { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 7) { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 242; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 8) { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 240; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 9) { const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 238; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost)); } cudaCheckReturn(cudaFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = (5.1f * A[t%2][i-1][j] + 12.1f * A[t%2][i][j-1] + 15.0f * A[t%2][i][j] + 12.2f * A[t%2][i][j+1] + 5.2f * A[t%2][i+1][j]) / 118; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
0bf97c2125d0af4380d8dbdc9a7e1e2069175a0c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2017 NVIDIA Corporation * * The U.S. Department of Energy funded the development of this software * under subcontract B609478 with Lawrence Livermore National Security, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "snap.h" #include "snap_cuda_help.h" template<int GROUPS, int STRIP_SIZE> __global__ void gpu_flux0_outer_source(const Point<3> origin, const AccessorArray<GROUPS, AccessorRO<double,3>,3> fa_qi0, const AccessorArray<GROUPS, AccessorRO<double,3>,3> fa_flux0, const AccessorArray<GROUPS, AccessorRO<MomentQuad,2>,2> fa_slgg, const AccessorRO<int,3> fa_mat, const AccessorArray<GROUPS, AccessorWO<double,3>,3> fa_qo0) { __shared__ double flux_buffer[GROUPS][STRIP_SIZE]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int z = blockIdx.z; const Point<3> p = origin + Point<3>(x,y,z); const int group = threadIdx.z; const int strip_offset = threadIdx.y * blockDim.x + threadIdx.x; // First, update our pointers const double *qi0_ptr = fa_qi0[group].ptr(p); const double *flux0_ptr = fa_flux0[group].ptr(p); const int *mat_ptr = fa_mat.ptr(p); double *qo0_ptr = fa_qo0[group].ptr(p); // Do a little prefetching of other values we need too // Be intelligent about loads, we're trying to keep the slgg // matrix in L2 cache so make sure all other loads and stores // are cached with a streaming prefix double flux0; asm volatile("ld.global.cs.f64 %0, [%1];" : "=d"(flux0) : "l"(flux0_ptr) : "memory"); // Other threads will use the material so cache at all levels int mat; asm volatile("ld.global.ca.s32 %0, [%1];" : "=r"(mat) : "l"(mat_ptr) : "memory"); double qo0; asm volatile("ld.global.cs.f64 %0, [%1];" : "=d"(qo0) : "l"(qi0_ptr) : "memory"); // Write the value into shared flux_buffer[group][strip_offset] = flux0; // Synchronize when all the writes into shared memory are done __syncthreads(); // Do the math #pragma unroll for (int g = 0; g < GROUPS; g++) { if (g == group) continue; const MomentQuad *local_slgg = fa_slgg[group].ptr(Point<2>(mat,g)); double cs; asm volatile("ld.global.ca.f64 %0, [%1];" : "=d"(cs) : "l"(local_slgg) : "memory"); qo0 += cs * flux_buffer[g][strip_offset]; } // Write out our result asm volatile("st.global.cs.f64 [%0], %1;" : : "l"(qo0_ptr), "d"(qo0) : "memory"); } template<int GROUPS, int MAX_X, int MAX_Y> __host__ void flux0_launch_helper(Rect<3> subgrid_bounds, const std::vector<AccessorRO<double,3> > fa_qi0, const std::vector<AccessorRO<double,3> > fa_flux0, const std::vector<AccessorRO<MomentQuad,2> > fa_slgg, const AccessorRO<int,3> &fa_mat, const std::vector<AccessorWO<double,3> > fa_qo0) { const int x_range = (subgrid_bounds.hi[0] - subgrid_bounds.lo[0]) + 1; const int y_range = (subgrid_bounds.hi[1] - subgrid_bounds.lo[1]) + 1; const int z_range = (subgrid_bounds.hi[2] - subgrid_bounds.lo[2]) + 1; dim3 block(gcd(x_range,MAX_X), gcd(y_range,MAX_Y), GROUPS); dim3 grid(x_range/block.x, y_range/block.y, z_range); hipLaunchKernelGGL(( gpu_flux0_outer_source<GROUPS,MAX_X*MAX_Y>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<GROUPS, AccessorRO<double,3>,3>(fa_qi0), AccessorArray<GROUPS, AccessorRO<double,3>,3>(fa_flux0), AccessorArray<GROUPS, AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<GROUPS, AccessorWO<double,3>,3>(fa_qo0)); } __host__ void run_flux0_outer_source(Rect<3> subgrid_bounds, const std::vector<AccessorRO<double,3> > &fa_qi0, const std::vector<AccessorRO<double,3> > &fa_flux0, const std::vector<AccessorRO<MomentQuad,2> > &fa_slgg, const std::vector<AccessorWO<double,3> > &fa_qo0, const AccessorRO<int,3> &fa_mat, const int num_groups) { // TODO: replace this template madness with Terra #define GROUP_CASE(g,x,y) \ case g: \ { \ flux0_launch_helper<g,x,y>(subgrid_bounds, fa_qi0, fa_flux0, fa_slgg, \ fa_mat, fa_qo0); \ break; \ } switch (num_groups) { GROUP_CASE(1,32,32) GROUP_CASE(2,32,16) GROUP_CASE(3,32,8) GROUP_CASE(4,32,8) GROUP_CASE(5,32,4) GROUP_CASE(6,32,4) GROUP_CASE(7,32,4) GROUP_CASE(8,32,4) GROUP_CASE(9,32,2) GROUP_CASE(10,32,2) GROUP_CASE(11,32,2) GROUP_CASE(12,32,2) GROUP_CASE(13,32,2) GROUP_CASE(14,32,2) GROUP_CASE(15,32,2) GROUP_CASE(16,32,2) GROUP_CASE(17,16,2) GROUP_CASE(18,16,2) GROUP_CASE(19,16,2) GROUP_CASE(20,16,2) GROUP_CASE(21,16,2) GROUP_CASE(22,16,2) GROUP_CASE(23,16,2) GROUP_CASE(24,16,2) GROUP_CASE(25,16,2) GROUP_CASE(26,16,2) GROUP_CASE(27,16,2) GROUP_CASE(28,16,2) GROUP_CASE(29,16,2) GROUP_CASE(30,16,2) GROUP_CASE(31,16,2) GROUP_CASE(32,16,2) GROUP_CASE(33,16,1) GROUP_CASE(34,16,1) GROUP_CASE(35,16,1) GROUP_CASE(36,16,1) GROUP_CASE(37,16,1) GROUP_CASE(38,16,1) GROUP_CASE(39,16,1) GROUP_CASE(40,16,1) GROUP_CASE(41,16,1) GROUP_CASE(42,16,1) GROUP_CASE(43,16,1) GROUP_CASE(44,16,1) GROUP_CASE(45,16,1) GROUP_CASE(46,16,1) GROUP_CASE(47,16,1) GROUP_CASE(48,16,1) GROUP_CASE(49,16,1) GROUP_CASE(50,16,1) GROUP_CASE(51,16,1) GROUP_CASE(52,16,1) GROUP_CASE(53,16,1) GROUP_CASE(54,16,1) GROUP_CASE(55,16,1) GROUP_CASE(56,16,1) GROUP_CASE(57,16,1) GROUP_CASE(58,16,1) GROUP_CASE(59,16,1) GROUP_CASE(60,16,1) GROUP_CASE(61,16,1) GROUP_CASE(62,16,1) GROUP_CASE(63,16,1) GROUP_CASE(64,16,1) GROUP_CASE(65,8,1) GROUP_CASE(66,8,1) GROUP_CASE(67,8,1) GROUP_CASE(68,8,1) GROUP_CASE(69,8,1) GROUP_CASE(70,8,1) GROUP_CASE(71,8,1) GROUP_CASE(72,8,1) GROUP_CASE(73,8,1) GROUP_CASE(74,8,1) GROUP_CASE(75,8,1) GROUP_CASE(76,8,1) GROUP_CASE(77,8,1) GROUP_CASE(78,8,1) GROUP_CASE(79,8,1) GROUP_CASE(80,8,1) GROUP_CASE(81,8,1) GROUP_CASE(82,8,1) GROUP_CASE(83,8,1) GROUP_CASE(84,8,1) GROUP_CASE(85,8,1) GROUP_CASE(86,8,1) GROUP_CASE(87,8,1) GROUP_CASE(88,8,1) GROUP_CASE(89,8,1) GROUP_CASE(90,8,1) GROUP_CASE(91,8,1) GROUP_CASE(92,8,1) GROUP_CASE(93,8,1) GROUP_CASE(94,8,1) GROUP_CASE(95,8,1) GROUP_CASE(96,8,1) // About to drop down to 1 CTA per SM due to shared memory default: printf("Adding group case to outer flux0 computation!\n"); assert(false); } #undef GROUP_CASE } template<int GROUPS, int STRIP_SIZE> __global__ void gpu_fluxm_outer_source(const Point<3> origin, const AccessorArray<GROUPS, AccessorRO<MomentTriple,3>,3> fa_fluxm, const AccessorArray<GROUPS, AccessorRO<MomentQuad,2>,2> fa_slgg, const AccessorRO<int,3> fa_mat, const AccessorArray<GROUPS, AccessorWO<MomentTriple,3>,3> fa_qom, const int num_moments, const ConstBuffer<4,int> lma) { __shared__ double fluxm_buffer_0[GROUPS][STRIP_SIZE]; __shared__ double fluxm_buffer_1[GROUPS][STRIP_SIZE]; __shared__ double fluxm_buffer_2[GROUPS][STRIP_SIZE]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y + blockDim.y + threadIdx.y; const int z = blockIdx.z; const Point<3> p = origin + Point<3>(x,y,z); const int group = threadIdx.z; const int strip_offset = threadIdx.y * blockDim.x + threadIdx.x; const MomentTriple *fluxm_ptr = fa_fluxm[group].ptr(p); const int *mat_ptr = fa_mat.ptr(p); MomentTriple *qom_ptr = fa_qom[group].ptr(p); MomentTriple fluxm; asm volatile("ld.global.cs.v2.f64 {%0,%1}, [%2];" : "=d"(fluxm[0]), "=d"(fluxm[1]) : "l"(fluxm_ptr) : "memory"); asm volatile("ld.global.cs.f64 %0, [%1];" : "=d"(fluxm[2]) : "l"(((char*)fluxm_ptr)+16) : "memory"); int mat; asm volatile("ld.global.ca.s32 %0, [%1];" : "=r"(mat) : "l"(mat_ptr) : "memory"); // Write the fluxm into shared memory fluxm_buffer_0[group][strip_offset] = fluxm[0]; fluxm_buffer_1[group][strip_offset] = fluxm[1]; fluxm_buffer_2[group][strip_offset] = fluxm[2]; // Synchronize to make sure all the writes to shared are done __syncthreads(); // Do the math MomentTriple qom; #pragma unroll for (int g = 0; g < GROUPS; g++) { if (g == group) continue; int moment = 0; const MomentQuad *local_slgg = fa_slgg[group].ptr(Point<2>(mat, g)); MomentQuad scat; asm volatile("ld.global.ca.v2.f64 {%0,%1}, [%2];" : "=d"(scat[0]), "=d"(scat[1]) : "l"(local_slgg) : "memory"); asm volatile("ld.global.ca.v2.f64 {%0,%1}, [%2];" : "=d"(scat[2]), "=d"(scat[3]) : "l"(((char*)local_slgg)+16) : "memory"); MomentTriple csm; for (int l = 1; l < num_moments; l++) { for (int j = 0; j < lma[l]; j++) csm[moment+j] = scat[l]; moment += lma[l]; } fluxm[0] = fluxm_buffer_0[g][strip_offset]; fluxm[1] = fluxm_buffer_1[g][strip_offset]; fluxm[2] = fluxm_buffer_2[g][strip_offset]; for (int l = 0; l < (num_moments-1); l++) qom[l] += csm[l] * fluxm[l]; } // Now we can write out the result asm volatile("st.global.cs.v2.f64 [%0], {%1,%2};" : : "l"(qom_ptr), "d"(qom[0]), "d"(qom[1]) : "memory"); asm volatile("st.global.cs.f64 [%0], %1;" : : "l"(((char*)qom_ptr)+16), "d"(qom[2]) : "memory"); } template<int GROUPS, int MAX_X, int MAX_Y> __host__ void fluxm_launch_helper(Rect<3> subgrid_bounds, const std::vector<AccessorRO<MomentTriple,3> > &fa_fluxm, const std::vector<AccessorRO<MomentQuad,2> > &fa_slgg, const std::vector<AccessorWO<MomentTriple,3> > &fa_qom, const AccessorRO<int,3> &fa_mat, const int num_groups, const int num_moments, const int lma[4]) { const int x_range = (subgrid_bounds.hi[0] - subgrid_bounds.lo[0]) + 1; const int y_range = (subgrid_bounds.hi[1] - subgrid_bounds.lo[1]) + 1; const int z_range = (subgrid_bounds.hi[2] - subgrid_bounds.lo[2]) + 1; dim3 block(gcd(x_range,MAX_X), gcd(y_range,MAX_Y), GROUPS); dim3 grid(x_range/block.x, y_range/block.y, z_range); hipLaunchKernelGGL(( gpu_fluxm_outer_source<GROUPS,MAX_X*MAX_Y>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<GROUPS, AccessorRO<MomentTriple,3>,3>(fa_fluxm), AccessorArray<GROUPS, AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<GROUPS, AccessorWO<MomentTriple,3>,3>(fa_qom), num_moments, ConstBuffer<4,int>(lma)); } __host__ void run_fluxm_outer_source(Rect<3> subgrid_bounds, const std::vector<AccessorRO<MomentTriple,3> > &fa_fluxm, const std::vector<AccessorRO<MomentQuad,2> > &fa_slgg, const std::vector<AccessorWO<MomentTriple,3> > &fa_qom, const AccessorRO<int,3> &fa_mat, const int num_groups, const int num_moments, const int lma[4]) { // TODO: replace this template madness with Terra #define GROUP_CASE(g,x,y) \ case g: \ { \ fluxm_launch_helper<g,x,y>(subgrid_bounds, fa_fluxm, fa_slgg, fa_qom, \ fa_mat, num_groups, num_moments, lma); \ break; \ } switch (num_groups) { GROUP_CASE(1,32,32) GROUP_CASE(2,32,16) GROUP_CASE(3,32,8) GROUP_CASE(4,32,8) GROUP_CASE(5,32,4) GROUP_CASE(6,32,4) GROUP_CASE(7,32,4) GROUP_CASE(8,32,4) GROUP_CASE(9,32,2) GROUP_CASE(10,32,2) GROUP_CASE(11,32,2) GROUP_CASE(12,32,2) GROUP_CASE(13,32,2) GROUP_CASE(14,32,2) GROUP_CASE(15,32,2) GROUP_CASE(16,32,2) #if 0 GROUP_CASE(17,16,2) GROUP_CASE(18,16,2) GROUP_CASE(19,16,2) GROUP_CASE(20,16,2) GROUP_CASE(21,16,2) GROUP_CASE(22,16,2) GROUP_CASE(23,16,2) GROUP_CASE(24,16,2) GROUP_CASE(25,16,2) GROUP_CASE(26,16,2) GROUP_CASE(27,16,2) GROUP_CASE(28,16,2) GROUP_CASE(29,16,2) GROUP_CASE(30,16,2) GROUP_CASE(31,16,2) GROUP_CASE(32,16,2) GROUP_CASE(33,16,1) GROUP_CASE(34,16,1) GROUP_CASE(35,16,1) GROUP_CASE(36,16,1) GROUP_CASE(37,16,1) GROUP_CASE(38,16,1) GROUP_CASE(39,16,1) GROUP_CASE(40,16,1) GROUP_CASE(41,16,1) GROUP_CASE(42,16,1) GROUP_CASE(43,16,1) GROUP_CASE(44,16,1) GROUP_CASE(45,16,1) GROUP_CASE(46,16,1) GROUP_CASE(47,16,1) GROUP_CASE(48,16,1) GROUP_CASE(49,16,1) GROUP_CASE(50,16,1) GROUP_CASE(51,16,1) GROUP_CASE(52,16,1) GROUP_CASE(53,16,1) GROUP_CASE(54,16,1) GROUP_CASE(55,16,1) GROUP_CASE(56,16,1) GROUP_CASE(57,16,1) GROUP_CASE(58,16,1) GROUP_CASE(59,16,1) GROUP_CASE(60,16,1) GROUP_CASE(61,16,1) GROUP_CASE(62,16,1) GROUP_CASE(63,16,1) GROUP_CASE(64,16,1) GROUP_CASE(65,8,1) GROUP_CASE(66,8,1) GROUP_CASE(67,8,1) GROUP_CASE(68,8,1) GROUP_CASE(69,8,1) GROUP_CASE(70,8,1) GROUP_CASE(71,8,1) GROUP_CASE(72,8,1) GROUP_CASE(73,8,1) GROUP_CASE(74,8,1) GROUP_CASE(75,8,1) GROUP_CASE(76,8,1) GROUP_CASE(77,8,1) GROUP_CASE(78,8,1) GROUP_CASE(79,8,1) GROUP_CASE(80,8,1) GROUP_CASE(81,8,1) GROUP_CASE(82,8,1) GROUP_CASE(83,8,1) GROUP_CASE(84,8,1) GROUP_CASE(85,8,1) GROUP_CASE(86,8,1) GROUP_CASE(87,8,1) GROUP_CASE(88,8,1) GROUP_CASE(89,8,1) GROUP_CASE(90,8,1) GROUP_CASE(91,8,1) GROUP_CASE(92,8,1) GROUP_CASE(93,8,1) GROUP_CASE(94,8,1) GROUP_CASE(95,8,1) GROUP_CASE(96,8,1) #endif default: printf("Adding group case to outer fluxm computation!\n"); assert(false); } #undef GROUP_CASE } __global__ void gpu_outer_convergence(const Point<3> origin, const AccessorRO<double,3> fa_flux0, const AccessorRO<double,3> fa_flux0po, const double epsi, const DeferredBuffer<int,1> results, const int results_offset) { // We know there is never more than 32 warps in a CTA __shared__ int trampoline[32]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int z = blockIdx.z * blockDim.z + threadIdx.z; const Point<3> p = origin + Point<3>(x,y,z); const double *flux0_ptr = fa_flux0.ptr(p); const double *flux0po_ptr = fa_flux0po.ptr(p); const double tolr = 1.0e-12; double flux0po = *flux0po_ptr; double df = 1.0; if (fabs(flux0po) < tolr) { flux0po = 1.0; df = 0.0; } double flux0 = *flux0_ptr; df = fabs( (flux0 / flux0po) - df ); int local_converged = 1; if ((df >= -INFINITY) && (df > epsi)) local_converged = 0; // Perform a local reduction inside the CTA // Butterfly reduction across all threads in all warps for (int i = 16; i >= 1; i/=2) local_converged += __shfl_xor_sync(0xfffffff, local_converged, i, 32); unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = ((threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x) >> 5; // First thread in each warp writes out all values if (laneid == 0) trampoline[warpid] = local_converged; __syncthreads(); // Butterfly reduction across all thread in the first warp if (warpid == 0) { unsigned numwarps = (blockDim.x * blockDim.y * blockDim.z) >> 5; local_converged = (laneid < numwarps) ? trampoline[laneid] : 0; for (int i = 16; i >= 1; i/=2) local_converged += __shfl_xor_sync(0xfffffff, local_converged, i, 32); // First thread does the atomic if (laneid == 0) results.write(Point<1>(results_offset + (blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x), local_converged); } } __global__ void gpu_sum_outer_convergence(const DeferredBuffer<int,1> buffer, const DeferredValue<bool> result, const size_t total_blocks, const int expected) { __shared__ int trampoline[32]; int offset = threadIdx.x; int total = 0; while (offset < total_blocks) { total += buffer.read(Point<1>(offset)); offset += blockDim.x; } for (int i = 16; i >= 1; i/=2) total += __shfl_xor_sync(0xfffffff, total, i, 32); unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // Write results in the trampoline if (laneid == 0) trampoline[warpid] = total; __syncthreads(); if (warpid == 0) { unsigned numwarps = blockDim.x >> 5; total = (laneid < numwarps) ? trampoline[laneid] : 0; for (int i = 16; i >= 1; i/=2) total += __shfl_xor_sync(0xfffffff, total, i, 32); if (laneid == 0) result.write(total == expected); } } __host__ void run_outer_convergence(Rect<3> subgrid_bounds, const DeferredValue<bool> &result, const std::vector<AccessorRO<double,3> > &fa_flux0, const std::vector<AccessorRO<double,3> > &fa_flux0po, const double epsi) { // Launch the kernels const int x_range = (subgrid_bounds.hi[0] - subgrid_bounds.lo[0]) + 1; const int y_range = (subgrid_bounds.hi[1] - subgrid_bounds.lo[1]) + 1; const int z_range = (subgrid_bounds.hi[2] - subgrid_bounds.lo[2]) + 1; dim3 block(gcd(x_range,32),gcd(y_range,4),gcd(z_range,4)); dim3 grid(x_range/block.x, y_range/block.y, z_range/block.z); const size_t total_blocks = grid.x*grid.y*grid.z; assert(fa_flux0.size() == fa_flux0po.size()); const Rect<1> bounds(Point<1>(0),Point<1>(total_blocks * fa_flux0.size() - 1)); DeferredBuffer<int,1> buffer(bounds, Memory::GPU_FB_MEM); for (unsigned idx = 0; idx < fa_flux0.size(); idx++) { hipLaunchKernelGGL(( gpu_outer_convergence), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, fa_flux0[idx], fa_flux0po[idx], epsi, buffer, idx * total_blocks); } dim3 block2((bounds.hi[0]+1) > 1024 ? 1024 : (bounds.hi[0]+1),1,1); // Round up to the nearest multiple of warps while ((block2.x % 32) != 0) block2.x++; dim3 grid2(1,1,1); const int expected = x_range * y_range * z_range * fa_flux0.size(); hipLaunchKernelGGL(( gpu_sum_outer_convergence), dim3(grid2),dim3(block2), 0, 0, buffer, result, bounds.hi[0]+1, expected); }
0bf97c2125d0af4380d8dbdc9a7e1e2069175a0c.cu
/* Copyright 2017 NVIDIA Corporation * * The U.S. Department of Energy funded the development of this software * under subcontract B609478 with Lawrence Livermore National Security, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "snap.h" #include "snap_cuda_help.h" template<int GROUPS, int STRIP_SIZE> __global__ void gpu_flux0_outer_source(const Point<3> origin, const AccessorArray<GROUPS, AccessorRO<double,3>,3> fa_qi0, const AccessorArray<GROUPS, AccessorRO<double,3>,3> fa_flux0, const AccessorArray<GROUPS, AccessorRO<MomentQuad,2>,2> fa_slgg, const AccessorRO<int,3> fa_mat, const AccessorArray<GROUPS, AccessorWO<double,3>,3> fa_qo0) { __shared__ double flux_buffer[GROUPS][STRIP_SIZE]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int z = blockIdx.z; const Point<3> p = origin + Point<3>(x,y,z); const int group = threadIdx.z; const int strip_offset = threadIdx.y * blockDim.x + threadIdx.x; // First, update our pointers const double *qi0_ptr = fa_qi0[group].ptr(p); const double *flux0_ptr = fa_flux0[group].ptr(p); const int *mat_ptr = fa_mat.ptr(p); double *qo0_ptr = fa_qo0[group].ptr(p); // Do a little prefetching of other values we need too // Be intelligent about loads, we're trying to keep the slgg // matrix in L2 cache so make sure all other loads and stores // are cached with a streaming prefix double flux0; asm volatile("ld.global.cs.f64 %0, [%1];" : "=d"(flux0) : "l"(flux0_ptr) : "memory"); // Other threads will use the material so cache at all levels int mat; asm volatile("ld.global.ca.s32 %0, [%1];" : "=r"(mat) : "l"(mat_ptr) : "memory"); double qo0; asm volatile("ld.global.cs.f64 %0, [%1];" : "=d"(qo0) : "l"(qi0_ptr) : "memory"); // Write the value into shared flux_buffer[group][strip_offset] = flux0; // Synchronize when all the writes into shared memory are done __syncthreads(); // Do the math #pragma unroll for (int g = 0; g < GROUPS; g++) { if (g == group) continue; const MomentQuad *local_slgg = fa_slgg[group].ptr(Point<2>(mat,g)); double cs; asm volatile("ld.global.ca.f64 %0, [%1];" : "=d"(cs) : "l"(local_slgg) : "memory"); qo0 += cs * flux_buffer[g][strip_offset]; } // Write out our result asm volatile("st.global.cs.f64 [%0], %1;" : : "l"(qo0_ptr), "d"(qo0) : "memory"); } template<int GROUPS, int MAX_X, int MAX_Y> __host__ void flux0_launch_helper(Rect<3> subgrid_bounds, const std::vector<AccessorRO<double,3> > fa_qi0, const std::vector<AccessorRO<double,3> > fa_flux0, const std::vector<AccessorRO<MomentQuad,2> > fa_slgg, const AccessorRO<int,3> &fa_mat, const std::vector<AccessorWO<double,3> > fa_qo0) { const int x_range = (subgrid_bounds.hi[0] - subgrid_bounds.lo[0]) + 1; const int y_range = (subgrid_bounds.hi[1] - subgrid_bounds.lo[1]) + 1; const int z_range = (subgrid_bounds.hi[2] - subgrid_bounds.lo[2]) + 1; dim3 block(gcd(x_range,MAX_X), gcd(y_range,MAX_Y), GROUPS); dim3 grid(x_range/block.x, y_range/block.y, z_range); gpu_flux0_outer_source<GROUPS,MAX_X*MAX_Y><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<GROUPS, AccessorRO<double,3>,3>(fa_qi0), AccessorArray<GROUPS, AccessorRO<double,3>,3>(fa_flux0), AccessorArray<GROUPS, AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<GROUPS, AccessorWO<double,3>,3>(fa_qo0)); } __host__ void run_flux0_outer_source(Rect<3> subgrid_bounds, const std::vector<AccessorRO<double,3> > &fa_qi0, const std::vector<AccessorRO<double,3> > &fa_flux0, const std::vector<AccessorRO<MomentQuad,2> > &fa_slgg, const std::vector<AccessorWO<double,3> > &fa_qo0, const AccessorRO<int,3> &fa_mat, const int num_groups) { // TODO: replace this template madness with Terra #define GROUP_CASE(g,x,y) \ case g: \ { \ flux0_launch_helper<g,x,y>(subgrid_bounds, fa_qi0, fa_flux0, fa_slgg, \ fa_mat, fa_qo0); \ break; \ } switch (num_groups) { GROUP_CASE(1,32,32) GROUP_CASE(2,32,16) GROUP_CASE(3,32,8) GROUP_CASE(4,32,8) GROUP_CASE(5,32,4) GROUP_CASE(6,32,4) GROUP_CASE(7,32,4) GROUP_CASE(8,32,4) GROUP_CASE(9,32,2) GROUP_CASE(10,32,2) GROUP_CASE(11,32,2) GROUP_CASE(12,32,2) GROUP_CASE(13,32,2) GROUP_CASE(14,32,2) GROUP_CASE(15,32,2) GROUP_CASE(16,32,2) GROUP_CASE(17,16,2) GROUP_CASE(18,16,2) GROUP_CASE(19,16,2) GROUP_CASE(20,16,2) GROUP_CASE(21,16,2) GROUP_CASE(22,16,2) GROUP_CASE(23,16,2) GROUP_CASE(24,16,2) GROUP_CASE(25,16,2) GROUP_CASE(26,16,2) GROUP_CASE(27,16,2) GROUP_CASE(28,16,2) GROUP_CASE(29,16,2) GROUP_CASE(30,16,2) GROUP_CASE(31,16,2) GROUP_CASE(32,16,2) GROUP_CASE(33,16,1) GROUP_CASE(34,16,1) GROUP_CASE(35,16,1) GROUP_CASE(36,16,1) GROUP_CASE(37,16,1) GROUP_CASE(38,16,1) GROUP_CASE(39,16,1) GROUP_CASE(40,16,1) GROUP_CASE(41,16,1) GROUP_CASE(42,16,1) GROUP_CASE(43,16,1) GROUP_CASE(44,16,1) GROUP_CASE(45,16,1) GROUP_CASE(46,16,1) GROUP_CASE(47,16,1) GROUP_CASE(48,16,1) GROUP_CASE(49,16,1) GROUP_CASE(50,16,1) GROUP_CASE(51,16,1) GROUP_CASE(52,16,1) GROUP_CASE(53,16,1) GROUP_CASE(54,16,1) GROUP_CASE(55,16,1) GROUP_CASE(56,16,1) GROUP_CASE(57,16,1) GROUP_CASE(58,16,1) GROUP_CASE(59,16,1) GROUP_CASE(60,16,1) GROUP_CASE(61,16,1) GROUP_CASE(62,16,1) GROUP_CASE(63,16,1) GROUP_CASE(64,16,1) GROUP_CASE(65,8,1) GROUP_CASE(66,8,1) GROUP_CASE(67,8,1) GROUP_CASE(68,8,1) GROUP_CASE(69,8,1) GROUP_CASE(70,8,1) GROUP_CASE(71,8,1) GROUP_CASE(72,8,1) GROUP_CASE(73,8,1) GROUP_CASE(74,8,1) GROUP_CASE(75,8,1) GROUP_CASE(76,8,1) GROUP_CASE(77,8,1) GROUP_CASE(78,8,1) GROUP_CASE(79,8,1) GROUP_CASE(80,8,1) GROUP_CASE(81,8,1) GROUP_CASE(82,8,1) GROUP_CASE(83,8,1) GROUP_CASE(84,8,1) GROUP_CASE(85,8,1) GROUP_CASE(86,8,1) GROUP_CASE(87,8,1) GROUP_CASE(88,8,1) GROUP_CASE(89,8,1) GROUP_CASE(90,8,1) GROUP_CASE(91,8,1) GROUP_CASE(92,8,1) GROUP_CASE(93,8,1) GROUP_CASE(94,8,1) GROUP_CASE(95,8,1) GROUP_CASE(96,8,1) // About to drop down to 1 CTA per SM due to shared memory default: printf("Adding group case to outer flux0 computation!\n"); assert(false); } #undef GROUP_CASE } template<int GROUPS, int STRIP_SIZE> __global__ void gpu_fluxm_outer_source(const Point<3> origin, const AccessorArray<GROUPS, AccessorRO<MomentTriple,3>,3> fa_fluxm, const AccessorArray<GROUPS, AccessorRO<MomentQuad,2>,2> fa_slgg, const AccessorRO<int,3> fa_mat, const AccessorArray<GROUPS, AccessorWO<MomentTriple,3>,3> fa_qom, const int num_moments, const ConstBuffer<4,int> lma) { __shared__ double fluxm_buffer_0[GROUPS][STRIP_SIZE]; __shared__ double fluxm_buffer_1[GROUPS][STRIP_SIZE]; __shared__ double fluxm_buffer_2[GROUPS][STRIP_SIZE]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y + blockDim.y + threadIdx.y; const int z = blockIdx.z; const Point<3> p = origin + Point<3>(x,y,z); const int group = threadIdx.z; const int strip_offset = threadIdx.y * blockDim.x + threadIdx.x; const MomentTriple *fluxm_ptr = fa_fluxm[group].ptr(p); const int *mat_ptr = fa_mat.ptr(p); MomentTriple *qom_ptr = fa_qom[group].ptr(p); MomentTriple fluxm; asm volatile("ld.global.cs.v2.f64 {%0,%1}, [%2];" : "=d"(fluxm[0]), "=d"(fluxm[1]) : "l"(fluxm_ptr) : "memory"); asm volatile("ld.global.cs.f64 %0, [%1];" : "=d"(fluxm[2]) : "l"(((char*)fluxm_ptr)+16) : "memory"); int mat; asm volatile("ld.global.ca.s32 %0, [%1];" : "=r"(mat) : "l"(mat_ptr) : "memory"); // Write the fluxm into shared memory fluxm_buffer_0[group][strip_offset] = fluxm[0]; fluxm_buffer_1[group][strip_offset] = fluxm[1]; fluxm_buffer_2[group][strip_offset] = fluxm[2]; // Synchronize to make sure all the writes to shared are done __syncthreads(); // Do the math MomentTriple qom; #pragma unroll for (int g = 0; g < GROUPS; g++) { if (g == group) continue; int moment = 0; const MomentQuad *local_slgg = fa_slgg[group].ptr(Point<2>(mat, g)); MomentQuad scat; asm volatile("ld.global.ca.v2.f64 {%0,%1}, [%2];" : "=d"(scat[0]), "=d"(scat[1]) : "l"(local_slgg) : "memory"); asm volatile("ld.global.ca.v2.f64 {%0,%1}, [%2];" : "=d"(scat[2]), "=d"(scat[3]) : "l"(((char*)local_slgg)+16) : "memory"); MomentTriple csm; for (int l = 1; l < num_moments; l++) { for (int j = 0; j < lma[l]; j++) csm[moment+j] = scat[l]; moment += lma[l]; } fluxm[0] = fluxm_buffer_0[g][strip_offset]; fluxm[1] = fluxm_buffer_1[g][strip_offset]; fluxm[2] = fluxm_buffer_2[g][strip_offset]; for (int l = 0; l < (num_moments-1); l++) qom[l] += csm[l] * fluxm[l]; } // Now we can write out the result asm volatile("st.global.cs.v2.f64 [%0], {%1,%2};" : : "l"(qom_ptr), "d"(qom[0]), "d"(qom[1]) : "memory"); asm volatile("st.global.cs.f64 [%0], %1;" : : "l"(((char*)qom_ptr)+16), "d"(qom[2]) : "memory"); } template<int GROUPS, int MAX_X, int MAX_Y> __host__ void fluxm_launch_helper(Rect<3> subgrid_bounds, const std::vector<AccessorRO<MomentTriple,3> > &fa_fluxm, const std::vector<AccessorRO<MomentQuad,2> > &fa_slgg, const std::vector<AccessorWO<MomentTriple,3> > &fa_qom, const AccessorRO<int,3> &fa_mat, const int num_groups, const int num_moments, const int lma[4]) { const int x_range = (subgrid_bounds.hi[0] - subgrid_bounds.lo[0]) + 1; const int y_range = (subgrid_bounds.hi[1] - subgrid_bounds.lo[1]) + 1; const int z_range = (subgrid_bounds.hi[2] - subgrid_bounds.lo[2]) + 1; dim3 block(gcd(x_range,MAX_X), gcd(y_range,MAX_Y), GROUPS); dim3 grid(x_range/block.x, y_range/block.y, z_range); gpu_fluxm_outer_source<GROUPS,MAX_X*MAX_Y><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<GROUPS, AccessorRO<MomentTriple,3>,3>(fa_fluxm), AccessorArray<GROUPS, AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<GROUPS, AccessorWO<MomentTriple,3>,3>(fa_qom), num_moments, ConstBuffer<4,int>(lma)); } __host__ void run_fluxm_outer_source(Rect<3> subgrid_bounds, const std::vector<AccessorRO<MomentTriple,3> > &fa_fluxm, const std::vector<AccessorRO<MomentQuad,2> > &fa_slgg, const std::vector<AccessorWO<MomentTriple,3> > &fa_qom, const AccessorRO<int,3> &fa_mat, const int num_groups, const int num_moments, const int lma[4]) { // TODO: replace this template madness with Terra #define GROUP_CASE(g,x,y) \ case g: \ { \ fluxm_launch_helper<g,x,y>(subgrid_bounds, fa_fluxm, fa_slgg, fa_qom, \ fa_mat, num_groups, num_moments, lma); \ break; \ } switch (num_groups) { GROUP_CASE(1,32,32) GROUP_CASE(2,32,16) GROUP_CASE(3,32,8) GROUP_CASE(4,32,8) GROUP_CASE(5,32,4) GROUP_CASE(6,32,4) GROUP_CASE(7,32,4) GROUP_CASE(8,32,4) GROUP_CASE(9,32,2) GROUP_CASE(10,32,2) GROUP_CASE(11,32,2) GROUP_CASE(12,32,2) GROUP_CASE(13,32,2) GROUP_CASE(14,32,2) GROUP_CASE(15,32,2) GROUP_CASE(16,32,2) #if 0 GROUP_CASE(17,16,2) GROUP_CASE(18,16,2) GROUP_CASE(19,16,2) GROUP_CASE(20,16,2) GROUP_CASE(21,16,2) GROUP_CASE(22,16,2) GROUP_CASE(23,16,2) GROUP_CASE(24,16,2) GROUP_CASE(25,16,2) GROUP_CASE(26,16,2) GROUP_CASE(27,16,2) GROUP_CASE(28,16,2) GROUP_CASE(29,16,2) GROUP_CASE(30,16,2) GROUP_CASE(31,16,2) GROUP_CASE(32,16,2) GROUP_CASE(33,16,1) GROUP_CASE(34,16,1) GROUP_CASE(35,16,1) GROUP_CASE(36,16,1) GROUP_CASE(37,16,1) GROUP_CASE(38,16,1) GROUP_CASE(39,16,1) GROUP_CASE(40,16,1) GROUP_CASE(41,16,1) GROUP_CASE(42,16,1) GROUP_CASE(43,16,1) GROUP_CASE(44,16,1) GROUP_CASE(45,16,1) GROUP_CASE(46,16,1) GROUP_CASE(47,16,1) GROUP_CASE(48,16,1) GROUP_CASE(49,16,1) GROUP_CASE(50,16,1) GROUP_CASE(51,16,1) GROUP_CASE(52,16,1) GROUP_CASE(53,16,1) GROUP_CASE(54,16,1) GROUP_CASE(55,16,1) GROUP_CASE(56,16,1) GROUP_CASE(57,16,1) GROUP_CASE(58,16,1) GROUP_CASE(59,16,1) GROUP_CASE(60,16,1) GROUP_CASE(61,16,1) GROUP_CASE(62,16,1) GROUP_CASE(63,16,1) GROUP_CASE(64,16,1) GROUP_CASE(65,8,1) GROUP_CASE(66,8,1) GROUP_CASE(67,8,1) GROUP_CASE(68,8,1) GROUP_CASE(69,8,1) GROUP_CASE(70,8,1) GROUP_CASE(71,8,1) GROUP_CASE(72,8,1) GROUP_CASE(73,8,1) GROUP_CASE(74,8,1) GROUP_CASE(75,8,1) GROUP_CASE(76,8,1) GROUP_CASE(77,8,1) GROUP_CASE(78,8,1) GROUP_CASE(79,8,1) GROUP_CASE(80,8,1) GROUP_CASE(81,8,1) GROUP_CASE(82,8,1) GROUP_CASE(83,8,1) GROUP_CASE(84,8,1) GROUP_CASE(85,8,1) GROUP_CASE(86,8,1) GROUP_CASE(87,8,1) GROUP_CASE(88,8,1) GROUP_CASE(89,8,1) GROUP_CASE(90,8,1) GROUP_CASE(91,8,1) GROUP_CASE(92,8,1) GROUP_CASE(93,8,1) GROUP_CASE(94,8,1) GROUP_CASE(95,8,1) GROUP_CASE(96,8,1) #endif default: printf("Adding group case to outer fluxm computation!\n"); assert(false); } #undef GROUP_CASE } __global__ void gpu_outer_convergence(const Point<3> origin, const AccessorRO<double,3> fa_flux0, const AccessorRO<double,3> fa_flux0po, const double epsi, const DeferredBuffer<int,1> results, const int results_offset) { // We know there is never more than 32 warps in a CTA __shared__ int trampoline[32]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int z = blockIdx.z * blockDim.z + threadIdx.z; const Point<3> p = origin + Point<3>(x,y,z); const double *flux0_ptr = fa_flux0.ptr(p); const double *flux0po_ptr = fa_flux0po.ptr(p); const double tolr = 1.0e-12; double flux0po = *flux0po_ptr; double df = 1.0; if (fabs(flux0po) < tolr) { flux0po = 1.0; df = 0.0; } double flux0 = *flux0_ptr; df = fabs( (flux0 / flux0po) - df ); int local_converged = 1; if ((df >= -INFINITY) && (df > epsi)) local_converged = 0; // Perform a local reduction inside the CTA // Butterfly reduction across all threads in all warps for (int i = 16; i >= 1; i/=2) local_converged += __shfl_xor_sync(0xfffffff, local_converged, i, 32); unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = ((threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x) >> 5; // First thread in each warp writes out all values if (laneid == 0) trampoline[warpid] = local_converged; __syncthreads(); // Butterfly reduction across all thread in the first warp if (warpid == 0) { unsigned numwarps = (blockDim.x * blockDim.y * blockDim.z) >> 5; local_converged = (laneid < numwarps) ? trampoline[laneid] : 0; for (int i = 16; i >= 1; i/=2) local_converged += __shfl_xor_sync(0xfffffff, local_converged, i, 32); // First thread does the atomic if (laneid == 0) results.write(Point<1>(results_offset + (blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x), local_converged); } } __global__ void gpu_sum_outer_convergence(const DeferredBuffer<int,1> buffer, const DeferredValue<bool> result, const size_t total_blocks, const int expected) { __shared__ int trampoline[32]; int offset = threadIdx.x; int total = 0; while (offset < total_blocks) { total += buffer.read(Point<1>(offset)); offset += blockDim.x; } for (int i = 16; i >= 1; i/=2) total += __shfl_xor_sync(0xfffffff, total, i, 32); unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // Write results in the trampoline if (laneid == 0) trampoline[warpid] = total; __syncthreads(); if (warpid == 0) { unsigned numwarps = blockDim.x >> 5; total = (laneid < numwarps) ? trampoline[laneid] : 0; for (int i = 16; i >= 1; i/=2) total += __shfl_xor_sync(0xfffffff, total, i, 32); if (laneid == 0) result.write(total == expected); } } __host__ void run_outer_convergence(Rect<3> subgrid_bounds, const DeferredValue<bool> &result, const std::vector<AccessorRO<double,3> > &fa_flux0, const std::vector<AccessorRO<double,3> > &fa_flux0po, const double epsi) { // Launch the kernels const int x_range = (subgrid_bounds.hi[0] - subgrid_bounds.lo[0]) + 1; const int y_range = (subgrid_bounds.hi[1] - subgrid_bounds.lo[1]) + 1; const int z_range = (subgrid_bounds.hi[2] - subgrid_bounds.lo[2]) + 1; dim3 block(gcd(x_range,32),gcd(y_range,4),gcd(z_range,4)); dim3 grid(x_range/block.x, y_range/block.y, z_range/block.z); const size_t total_blocks = grid.x*grid.y*grid.z; assert(fa_flux0.size() == fa_flux0po.size()); const Rect<1> bounds(Point<1>(0),Point<1>(total_blocks * fa_flux0.size() - 1)); DeferredBuffer<int,1> buffer(bounds, Memory::GPU_FB_MEM); for (unsigned idx = 0; idx < fa_flux0.size(); idx++) { gpu_outer_convergence<<<grid,block>>>(subgrid_bounds.lo, fa_flux0[idx], fa_flux0po[idx], epsi, buffer, idx * total_blocks); } dim3 block2((bounds.hi[0]+1) > 1024 ? 1024 : (bounds.hi[0]+1),1,1); // Round up to the nearest multiple of warps while ((block2.x % 32) != 0) block2.x++; dim3 grid2(1,1,1); const int expected = x_range * y_range * z_range * fa_flux0.size(); gpu_sum_outer_convergence<<<grid2,block2>>>(buffer, result, bounds.hi[0]+1, expected); }