hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
14e75643215a70806300cc4f0a42d4a06bab4873.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include "CycleTimer.h" extern float toBW(int bytes, float sec); __global__ void saxpy_kernel(int N, float alpha, float* x, float* y, float* result) { // compute overall index from position of thread in current block, // and given the block we are in int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) result[index] = alpha * x[index] + y[index]; } void saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) { int totalBytes = sizeof(float) * 3 * N; // compute number of blocks and threads per block const int threadsPerBlock = 512; const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock; float* device_x; float* device_y; float* device_result; // // TODO: allocate device memory buffers on the GPU using // hipMalloc. The started code issues warnings on build because // these buffers are used in the call to saxpy_kernel below // without being initialized. // hipMalloc(&device_x, N * sizeof(float)); hipMalloc(&device_y, N * sizeof(float)); hipMalloc(&device_result, N * sizeof(float)); // start timing after allocation of device memory. double startTime = CycleTimer::currentSeconds(); // // TODO: copy input arrays to the GPU using hipMemcpy // hipMemcpy(device_x, xarray, N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(device_y, yarray, N * sizeof(float), hipMemcpyHostToDevice); /*hipMemcpy(device_result, resultarray, N * sizeof(float), hipMemcpyHostToDevice);*/ // // TODO: insert time here to begin timing only the kernel // double kernel_start = CycleTimer::currentSeconds(); // run saxpy_kernel on the GPU hipLaunchKernelGGL(( saxpy_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, alpha, device_x, device_y, device_result); // // TODO: insert timer here to time only the kernel. Since the // kernel will run asynchronously with the calling CPU thread, you // need to call hipDeviceSynchronize() before your timer to // ensure the kernel running on the GPU has completed. (Otherwise // you will incorrectly observe that almost no time elapses!) // hipDeviceSynchronize(); double kernel_end = CycleTimer::currentSeconds(); // // TODO: copy result from GPU using hipMemcpy // hipMemcpy(resultarray, device_result, N * sizeof(float), hipMemcpyDeviceToHost); // end timing after result has been copied back into host memory. // The time elapsed between startTime and endTime is the total // time to copy data to the GPU, run the kernel, and copy the // result back to the CPU double endTime = CycleTimer::currentSeconds(); hipError_t errCode = hipPeekAtLastError(); if (errCode != hipSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode)); } double overallDuration = endTime - startTime; double kernelDuration = kernel_end - kernel_start; double copyDuration = overallDuration - kernelDuration; printf("Overall time: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); printf("Kernel time: %.3f ms\n", 1000.f * kernelDuration); printf("Copy time: %.3f ms\n", 1000.f * copyDuration); // // TODO free memory buffers on the GPU // hipFree(device_x); hipFree(device_y); hipFree(device_result); } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
14e75643215a70806300cc4f0a42d4a06bab4873.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include "CycleTimer.h" extern float toBW(int bytes, float sec); __global__ void saxpy_kernel(int N, float alpha, float* x, float* y, float* result) { // compute overall index from position of thread in current block, // and given the block we are in int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) result[index] = alpha * x[index] + y[index]; } void saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) { int totalBytes = sizeof(float) * 3 * N; // compute number of blocks and threads per block const int threadsPerBlock = 512; const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock; float* device_x; float* device_y; float* device_result; // // TODO: allocate device memory buffers on the GPU using // cudaMalloc. The started code issues warnings on build because // these buffers are used in the call to saxpy_kernel below // without being initialized. // cudaMalloc(&device_x, N * sizeof(float)); cudaMalloc(&device_y, N * sizeof(float)); cudaMalloc(&device_result, N * sizeof(float)); // start timing after allocation of device memory. double startTime = CycleTimer::currentSeconds(); // // TODO: copy input arrays to the GPU using cudaMemcpy // cudaMemcpy(device_x, xarray, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(device_y, yarray, N * sizeof(float), cudaMemcpyHostToDevice); /*cudaMemcpy(device_result, resultarray, N * sizeof(float), cudaMemcpyHostToDevice);*/ // // TODO: insert time here to begin timing only the kernel // double kernel_start = CycleTimer::currentSeconds(); // run saxpy_kernel on the GPU saxpy_kernel<<<blocks, threadsPerBlock>>>(N, alpha, device_x, device_y, device_result); // // TODO: insert timer here to time only the kernel. Since the // kernel will run asynchronously with the calling CPU thread, you // need to call cudaThreadSynchronize() before your timer to // ensure the kernel running on the GPU has completed. (Otherwise // you will incorrectly observe that almost no time elapses!) // cudaThreadSynchronize(); double kernel_end = CycleTimer::currentSeconds(); // // TODO: copy result from GPU using cudaMemcpy // cudaMemcpy(resultarray, device_result, N * sizeof(float), cudaMemcpyDeviceToHost); // end timing after result has been copied back into host memory. // The time elapsed between startTime and endTime is the total // time to copy data to the GPU, run the kernel, and copy the // result back to the CPU double endTime = CycleTimer::currentSeconds(); cudaError_t errCode = cudaPeekAtLastError(); if (errCode != cudaSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode)); } double overallDuration = endTime - startTime; double kernelDuration = kernel_end - kernel_start; double copyDuration = overallDuration - kernelDuration; printf("Overall time: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); printf("Kernel time: %.3f ms\n", 1000.f * kernelDuration); printf("Copy time: %.3f ms\n", 1000.f * copyDuration); // // TODO free memory buffers on the GPU // cudaFree(device_x); cudaFree(device_y); cudaFree(device_result); } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
b337792a63bbb09d149f32ba45e2fc77d5fac438.hip
// !!! This is a file automatically generated by hipify!!! /// LSU EE 7722 GPU Microarchitecture // /// Homework 3 - Spring 2018 // // Assignment: http://www.ece.lsu.edu/koppel/gp/2018/hw03.pdf /// Documentation // // c++: http://en.cppreference.com // CUDA: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html // CUDA debugger: https://docs.nvidia.com/cuda/cuda-gdb/index.html #include <pthread.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <ctype.h> #include <time.h> #include <new> #include <hip/hip_runtime.h> #include <assert.h> #include <nperf.h> #include "util.h" #include <ptable.h> // Maximum size of matrix. #define N 64 // Make it easy to switch between float and double for vertex and matrix // elements. // typedef float Elt_Type; struct App { // Number of input and output vectors, that is, the size of the // input and output arrays. // int num_vecs; Elt_Type matrix[N][N]; // Host pointers to the input and output arrays, and to a CPU-computed // output array used for checking results. // Elt_Type *h_in, *h_out, *h_out_check; // // Note: h_in points to an array holding num_vecs N-element vectors, // and so the total size of h_in is num_vects * N elements. // GPU pointers to the input and output arrays. // Elt_Type *d_in, *d_out; // GPU pointers to the input and output arrays, cast to float4s. // float4 *d_in_f4, *d_out_f4; // // The compiler can emit more efficient load and store instructions // to float4 elements than to four consecutive floats. // // Note: These "_f4" pointers only work when Elt_Type is a float. // Size of matrix to use. int n; }; // In host address space. App app; // In device constant address space. __constant__ App d_app; typedef void (*KPtr)(Elt_Type *dout, const Elt_Type *din); template <int eN> __device__ void mxv_sh_ochunk() { /// DO NOT MODIFY THIS ROUTINE. USE IT FOR COMPARISON. // Instead, modify mxv_sh_ochunk_sol_mn. // Matrix size is eN by eN. // Since size in this case is a compile-time constant code will // be of higher quality. // First things, first. Make sure that the problem size // matches the specialization. const int tid = blockIdx.x * blockDim.x + threadIdx.x; assert( tid != 0 || eN == d_app.n ); const int CS = 8; // Chunk Size: Number of input vector components to read. const int num_threads = blockDim.x * gridDim.x; // First element used by this block. const int bl_start = blockIdx.x * blockDim.x / CS; const int stop = d_app.num_vecs; const int inc = num_threads / CS; const int thd_c_offset = threadIdx.x % CS; const int thd_r_offset = threadIdx.x % CS; const int thd_v_offset = threadIdx.x / CS; const int MAX_BLOCK_SIZE = 1024; __shared__ Elt_Type vxfer[MAX_BLOCK_SIZE]; // Maximum number of output vector components written per thread, // based on vector size, eN. const int NL = eN / CS; /// DO NOT MODIFY THIS ROUTINE. USE IT FOR COMPARISON. // Instead, modify mxv_sh_ochunk_sol_mn. for ( int hb = bl_start; hb<stop; hb += inc ) { // Initialize output vector components to zero. Elt_Type vout[NL]; for ( auto& v: vout ) v = 0; #pragma unroll for ( int c=0; c<eN; c += CS ) { // This thread along with CS-1 of its neighbors load CS // components of an input vector. // vxfer[threadIdx.x] = d_app.d_in[ ( hb + thd_v_offset ) * eN + c + thd_c_offset ]; // Copy input vector components from shared to local address space. // Elt_Type vin[CS]; for ( int cc=0; cc<CS; cc++ ) vin[cc] = vxfer[ thd_v_offset * CS + cc ]; // Using the CS input vector components, compute part // of our NL output vector components. for ( int rr=0; rr<NL; rr++ ) { const int r = rr * CS + thd_r_offset; for ( int cc=0; cc<CS; cc++ ) vout[rr] += d_app.matrix[r][c+cc] * vin[cc]; } } #pragma unroll // Write output vector components to global memory. for ( int rr=0; rr<NL; rr++ ) { const int r = rr * CS + thd_r_offset; d_app.d_out[ ( hb + thd_v_offset ) * eN + r ] = vout[rr]; } } } // Instantiate versions for 16 by 16 and 32 by 32 matrices. extern "C" __global__ void mxv_sh_ochunk_64(){ mxv_sh_ochunk<64>(); } extern "C" __global__ void mxv_sh_ochunk_32(){ mxv_sh_ochunk<32>(); } extern "C" __global__ void mxv_sh_ochunk_16(){ mxv_sh_ochunk<16>(); } extern "C" __global__ void mxv_sh_ochunk_mn() { /// DO NOT MODIFY THIS ROUTINE. USE IT FOR COMPARISON. // Instead, modify mxv_sh_ochunk_sol_mn. // Matrix size is in d_app structure. d_app.n by d_app.n. const int CS = 8; // Chunk Size: Number of input vector components to read. const int num_threads = blockDim.x * gridDim.x; // First element used by this block. const int bl_start = blockIdx.x * blockDim.x / CS; const int stop = d_app.num_vecs; const int inc = num_threads / CS; const int thd_c_offset = threadIdx.x % CS; const int thd_r_offset = thd_c_offset; const int thd_v_offset = threadIdx.x / CS; const int MAX_BLOCK_SIZE = 1024; __shared__ Elt_Type vxfer[MAX_BLOCK_SIZE]; const int n = d_app.n; // Maximum number of output vector components written per thread, // based on maximum vector size, N. const int NL = N / CS; // Number of output vector components written per thread based on // vector size, n. const int nl = n / CS; /// DO NOT MODIFY THIS ROUTINE. USE IT FOR COMPARISON. // Instead, modify mxv_sh_ochunk_sol_mn. for ( int hb = bl_start; hb<stop; hb += inc ) { // Initialize output vector components to zero. Note that the // code initializes for the maximum number of components // assigned to this thread, NL, not the actual number, nl or // nl-1. Elt_Type vout[NL]; for ( auto& v: vout ) v = 0; for ( int c=0; c<n; c += CS ) { // This thread along with CS-1 of its neighbors load CS // components of an input vector. // vxfer[threadIdx.x] = d_app.d_in[ ( hb + thd_v_offset ) * n + c + thd_c_offset ]; // Copy input vector components from shared to local address space. // Elt_Type vin[CS]; for ( int cc=0; cc<CS; cc++ ) vin[cc] = vxfer[ thd_v_offset * CS + cc ]; // Using the CS input vector components, compute part // of our nl output vector components. for ( int rr=0; rr<nl; rr++ ) { const int r = rr * CS + thd_r_offset; for ( int cc=0; cc<CS; cc++ ) vout[rr] += d_app.matrix[r][c+cc] * vin[cc]; } } /// DO NOT MODIFY THIS ROUTINE. USE IT FOR COMPARISON. // Instead, modify mxv_sh_ochunk_sol_mn. // Write output vector components to global memory. for ( int rr=0; rr<nl; rr++ ) { const int r = rr * CS + thd_r_offset; d_app.d_out[ ( hb + thd_v_offset ) * n + r ] = vout[rr]; } } /// DO NOT MODIFY THIS ROUTINE. USE IT FOR COMPARISON. // Instead, modify mxv_sh_ochunk_sol_mn. } extern "C" __global__ void mxv_sh_ochunk_sol_mn() { /// PUT SOLUTION IN THIS ROUTINE . // Matrix size is in d_app structure. d_app.n by d_app.n. const int CS = 8; // Chunk Size: Number of input vector components to read. const int num_threads = blockDim.x * gridDim.x; // First element used by this block. const int bl_start = blockIdx.x * blockDim.x / CS; const int stop = d_app.num_vecs; const int inc = num_threads / CS; const int thd_c_offset = threadIdx.x % CS; const int thd_r_offset = thd_c_offset; const int thd_v_offset = threadIdx.x / CS; const int MAX_BLOCK_SIZE = 1024; __shared__ Elt_Type vxfer[MAX_BLOCK_SIZE]; const int n = d_app.n; // Maximum number of output vector components written per thread, // based on maximum vector size, N. const int NL = N / CS; // Number of output vector components written per thread based on // vector size, n. const int nl = n / CS; /// PUT SOLUTION IN THIS ROUTINE . for ( int hb = bl_start; hb<stop; hb += inc ) { // Initialize output vector components to zero. Note that the // code initializes for the maximum number of components // assigned to this thread, NL, not the actual number, nl or // nl-1. Elt_Type vout[NL]; for ( auto& v: vout ) v = 0; for ( int c=0; c<n; c += CS ) { // This thread along with CS-1 of its neighbors load CS // components of an input vector. // vxfer[threadIdx.x] = d_app.d_in[ ( hb + thd_v_offset ) * n + c + thd_c_offset ]; // Copy input vector components from shared to local address space. // Elt_Type vin[CS]; for ( int cc=0; cc<CS; cc++ ) vin[cc] = vxfer[ thd_v_offset * CS + cc ]; // Using the CS input vector components, compute part // of our nl output vector components. for ( int rr=0; rr<nl; rr++ ) { const int r = rr * CS + thd_r_offset; for ( int cc=0; cc<CS; cc++ ) vout[rr] += d_app.matrix[r][c+cc] * vin[cc]; } } // Write output vector components to global memory. for ( int rr=0; rr<nl; rr++ ) { const int r = rr * CS + thd_r_offset; d_app.d_out[ ( hb + thd_v_offset ) * n + r ] = vout[rr]; } } /// PUT SOLUTION IN THIS ROUTINE . } GPU_Info print_gpu_and_kernel_info() { GPU_Info info; gpu_info_print(); // Determine which GPU to use. (For starters, if there's more than // one, choose the one connected to the display.) // int dev = gpu_choose_index(); CE(hipSetDevice(dev)); printf("Using GPU %d\n",dev); info.get_gpu_info(dev); #if 0 info.GET_INFO(mxv_sh_ochunk_sol_mn); #else info.GET_INFO(mxv_sh_ochunk_16); info.GET_INFO(mxv_sh_ochunk_32); info.GET_INFO(mxv_sh_ochunk_64); info.GET_INFO(mxv_sh_ochunk_sol_mn); info.GET_INFO(mxv_sh_ochunk_mn); #endif // Print information about kernel. // printf("\nCUDA Kernel Resource Usage:\n"); for ( int i=0; i<info.num_kernels; i++ ) { printf("For %s:\n", info.ki[i].name); printf(" %6zd shared, %zd const, %zd loc, %d regs; " "%d max threads per block.\n", info.ki[i].cfa.sharedSizeBytes, info.ki[i].cfa.constSizeBytes, info.ki[i].cfa.localSizeBytes, info.ki[i].cfa.numRegs, info.ki[i].cfa.maxThreadsPerBlock); } return info; } int main(int argc, char **argv) { // When debug true: matrix is identity and for each vector v_i=i // (component i is set to value i). const bool debug = false; // Must be called before any CUDA API calls. NPerf_init(); // Get info about GPU and each kernel. // GPU_Info info = print_gpu_and_kernel_info(); const int num_mp = info.cuda_prop.multiProcessorCount; // Examine argument 1, block count, default is number of MPs. // const int arg1_int = argc < 2 ? num_mp : atoi(argv[1]); const int num_blocks = arg1_int == 0 ? num_mp : arg1_int < 0 ? -arg1_int * num_mp : arg1_int; // Examine argument 2, number of threads per block. // const bool opt_p = argc >= 3 && string(argv[2]) == "p"; const int thd_per_block_arg = argc < 3 ? 1024 : opt_p ? 0 : atoi(argv[2]); const int thd_per_block_goal = thd_per_block_arg == 0 ? 1024 : thd_per_block_arg; const int num_threads = num_blocks * thd_per_block_goal; const bool vary_warps = thd_per_block_arg == 0; // Examine argument 3, size of array in MiB. Fractional values okay. // app.num_vecs = argc < 4 ? 1 << 20 : int( atof(argv[3]) * (1<<20) ); if ( num_threads <= 0 || app.num_vecs <= 0 ) { printf("Usage: %s [ NUM_CUDA_BLOCKS ] [THD_PER_BLOCK|p] " "[DATA_SIZE_MiB]\n", argv[0]); exit(1); } // Collect performance data using a wrapper to NVIDIA CUPTI event // counter API. // NPerf_metric_collect("inst_executed"); if ( opt_p ) { NPerf_metric_collect("gld_efficiency"); NPerf_metric_collect("l2_read_throughput"); NPerf_metric_collect("l2_write_throughput"); NPerf_metric_collect("flop_sp_efficiency"); NPerf_metric_collect("shared_efficiency"); } // // Note: The more metrics that are collected, the more times a kernel // will need to be run. if ( false ) NPerf_metrics_off(); const size_t in_size_elts = size_t(app.num_vecs) * N; const size_t in_size_bytes = in_size_elts * sizeof( app.h_in[0] ); const size_t out_size_elts = size_t(app.num_vecs) * N; const size_t out_size_bytes = out_size_elts * sizeof( app.h_out[0] ); const int overrun_size_elts = thd_per_block_goal * N * 32; const int overrun_size_bytes = overrun_size_elts * sizeof( app.h_out[0] ); // Allocate storage for CPU copy of data. // app.h_in = new Elt_Type[ in_size_elts ]; app.h_out = new Elt_Type[ out_size_elts ]; app.h_out_check = NULL; // Allocate storage for GPU copy of data. // CE( hipMalloc( &app.d_in, in_size_bytes + overrun_size_bytes ) ); app.d_in_f4 = (float4*) app.d_in; CE( hipMalloc( &app.d_out, out_size_bytes + overrun_size_bytes ) ); app.d_out_f4 = (float4*) app.d_out; printf ("Max matrix: %d x %d. Num vectors (S): %d. Grid size: %d blocks.\n", N, N, app.num_vecs, num_blocks); printf("Elements per thread: %.1f (4 wp) - %.1f (32 wp)\n", double(app.num_vecs) / ( num_blocks * 4 * 32 ), double(app.num_vecs) / ( num_blocks * 32 * 32 )); // Initialize input array. // for ( int i=0; i<app.num_vecs; i++ ) for ( int c=0; c<N; c++ ) app.h_in[ i * N + c ] = debug ? Elt_Type(c) : drand48(); // Initialize matrix. // for ( int r=0; r<N; r++ ) for ( int c=0; c<N; c++ ) app.matrix[r][c] = debug ? r == c : drand48(); struct Shape { Shape(int np):n(np),h_out_check(app.num_vecs*np){}; Shape():n(0){}; int n; vector<Elt_Type> h_out_check; }; vector<int> sizes = { 64, 32, 16 }; map<int,Shape> shapes; for ( auto n: sizes ) shapes.emplace(n,n); // Compute correct answers. // for ( auto& sh: shapes ) { Shape& s = sh.second; assert( s.n <= N ); for ( int i=0; i<app.num_vecs; i++ ) for ( int r=0; r<s.n; r++ ) { s.h_out_check[ i * s.n + r ] = 0; for ( int c=0; c<s.n; c++ ) s.h_out_check[ i * s.n + r ] += app.h_in[ i * s.n + c ] * app.matrix[r][c]; } } app.h_out_check = shapes[N].h_out_check.data(); struct KShape { KShape(int knop, Shape *sp, Kernel_Info *kip):kno(knop),s(sp),ki(kip){}; const int kno; Shape* const s; Kernel_Info* const ki; }; vector<KShape> kshapes; for ( int kernel = 0; kernel < info.num_kernels; kernel++ ) { Kernel_Info* const ki = &info.ki[kernel]; string kn = ki->name; // Eagerly awaiting c++20 ends_with. RHEL should have it by 2035. string suffix = kn.substr(kn.size()-3,3); if ( suffix == "_mn" ) for ( auto& sh: shapes ) kshapes.emplace_back(kernel,&sh.second,ki); else if ( suffix == "_64" ) kshapes.emplace_back(kernel,&shapes[64],ki); else if ( suffix == "_32" ) kshapes.emplace_back(kernel,&shapes[32],ki); else if ( suffix == "_16" ) kshapes.emplace_back(kernel,&shapes[16],ki); } double elapsed_time_s = 86400; // Reassigned to minimum run time. const int output_width = stdout_width_get(); { // Prepare events used for timing. // hipEvent_t gpu_start_ce, gpu_stop_ce; CE(hipEventCreate(&gpu_start_ce)); CE(hipEventCreate(&gpu_stop_ce)); // Copy input array from CPU to GPU. // CE( hipMemcpy ( app.d_in, app.h_in, in_size_bytes, hipMemcpyHostToDevice ) ); // Launch kernel multiple times and keep track of the best time. printf("Launching with %d blocks of up to %d threads. \n", num_blocks, thd_per_block_goal); for ( KShape ks: kshapes ) { const int kernel = ks.kno; Kernel_Info* const ki = ks.ki; hipFuncAttributes& cfa = ki->cfa; const int wp_limit = cfa.maxThreadsPerBlock >> 5; const int thd_limit = wp_limit << 5; const int thd_per_block_no_vary = min(thd_per_block_goal,thd_limit); const int wp_start = 4; const int wp_stop = vary_warps ? wp_limit : wp_start; const int wp_inc = 4; app.n = ks.s ? ks.s->n : N; assert( app.n > 0 ); const int64_t num_ops = // Multiply-adds. int64_t(app.n) * app.n * app.num_vecs; // Amount of data in and out of GPU chip. const int64_t amt_data_bytes = sizeof(app.h_in[0]) * app.num_vecs * ( app.n + app.n ); // Copy App structure to GPU. // CE( hipMemcpyToSymbol ( d_app, &app, sizeof(app), 0, hipMemcpyHostToDevice ) ); pString msize = pStringF("(%d,%d)",app.n,app.n); pTable table; table.stream = stdout; for ( int wp_cnt = 0, wp_iter = wp_start; wp_cnt < wp_stop && ( wp_cnt = min(wp_iter,wp_stop) ); wp_iter += wp_inc ) { const int thd_per_block = vary_warps ? wp_cnt << 5 : thd_per_block_no_vary; // Zero the output array. // CE(hipMemset(app.d_out,0,out_size_bytes)); // Measure execution time starting "now", which is after data // set to GPU. // CE(hipEventRecord(gpu_start_ce,0)); // Launch Kernel // for ( NPerf_data_reset(); NPerf_need_run_get(); ) KPtr(info.ki[kernel]hipLaunchKernelGGL((.func_ptr)), dim3(num_blocks),dim3(thd_per_block), 0, 0, app.d_out,app.d_in); // Stop measuring execution time now, which is before is data // returned from GPU. // CE(hipEventRecord(gpu_stop_ce,0)); CE(hipEventSynchronize(gpu_stop_ce)); float cuda_time_ms = -1.1; CE(hipEventElapsedTime(&cuda_time_ms,gpu_start_ce,gpu_stop_ce)); const double this_elapsed_time_s = NPerf_metrics_collection_get() ? NPerf_kernel_et_get() : cuda_time_ms * 0.001; const double thpt_compute_gflops = num_ops / this_elapsed_time_s * 1e-9; const double thpt_data_gbps = amt_data_bytes / this_elapsed_time_s * 1e-9; if ( vary_warps ) { const double comp_frac = 1e9 * thpt_compute_gflops / ( sizeof(Elt_Type) == 4 ? info.chip_sp_flops : sizeof(Elt_Type) == 8 ? info.chip_dp_flops : 1 ); const double comm_frac = min(2.0,1e9 * thpt_data_gbps / info.chip_bw_Bps); // Number of warps, rounded up. // const int num_wps = ( thd_per_block + 31 ) >> 5; // The maximum number of active blocks per MP for this // kernel when launched with a block size of thd_per_block. // const int max_bl_per_mp = info.get_max_active_blocks_per_mp(kernel,thd_per_block); // Compute number of blocks available per MP based only on // the number of blocks. This may be larger than the // number of blocks that can run. // const int bl_per_mp_available = 0.999 + double(num_blocks) / num_mp; // The number of active blocks is the minimum of what // can fit and how many are available. // const int bl_per_mp = min( bl_per_mp_available, max_bl_per_mp ); // Based on the number of blocks, compute the num ber of warps. // const int act_wps = num_wps * bl_per_mp; if ( wp_cnt == wp_start ) printf("Kernel %s%s:\n", info.ki[kernel].name, msize.s); table.row_start(); table.entry("wp",num_wps); table.entry("ac",act_wps); table.entry("t/s","%6.0f", this_elapsed_time_s * 1e6); table.entry ("I/op","%4.1f", NPerf_metric_value_get("inst_executed") * 32.0 / num_ops ); if ( opt_p ) { table.entry ("Ld eff","%5.1f%%", NPerf_metric_value_get("gld_efficiency")); table.entry ("SM eff","%5.1f%%", NPerf_metric_value_get("shared_efficiency")); table.entry ("L2r","%5.1f", NPerf_metric_value_get("l2_read_throughput") * 1e-9 ); table.entry ("L2w","%5.1f", NPerf_metric_value_get("l2_write_throughput") * 1e-9 ); table.entry ("FP%","%5.1f%%", NPerf_metric_value_get("flop_sp_efficiency")); } const bool plot_bandwidth = false; table.entry("GB/s","%4.0f", thpt_data_gbps); table.entry("FP ","%4.0f", thpt_compute_gflops); const int max_st_len = max(5, output_width - 1 - table.row_len_get() ); pStringF fmt("%%-%ds",max_st_len); string util_hdr = plot_bandwidth ? "Bandwidth Util" : "FP Utilization"; const double frac = plot_bandwidth ? comm_frac : comp_frac; util_hdr += string(max_st_len - util_hdr.length(),'-'); table.entry (util_hdr,fmt, string( size_t(max(0.0,frac*max_st_len)), '*' ), pTable::pT_Left); } else { printf ("%-20s %2d wp %7.0f s %8.3f GF %8.3f GB/s " "%5.2f I/F\n", info.ki[kernel].name, (thd_per_block + 31 ) >> 5, this_elapsed_time_s * 1e6, thpt_compute_gflops, thpt_data_gbps, NPerf_metric_value_get("inst_executed") * 32 / num_ops ); } table.row_end(); elapsed_time_s = min(this_elapsed_time_s,elapsed_time_s); // Copy output array from GPU to CPU. // CE( hipMemcpy ( app.h_out, app.d_out, out_size_bytes, hipMemcpyDeviceToHost) ); int err_count = 0; Elt_Type* const h_out_check = ks.s ? ks.s->h_out_check.data() : app.h_out_check; for ( int i=0; i<app.num_vecs; i++ ) for ( int r=0; r<app.n; r++ ) { const int idx = i * app.n + r; if ( fabs( h_out_check[idx] - app.h_out[idx] ) > 1e-5 ) { err_count++; if ( err_count < 5 ) printf ("Error at vec %d elt %d: %.7f != %.7f (correct)\n", i, r, app.h_out[idx], h_out_check[idx] ); } } if ( err_count ) printf("Total errors %d\n", err_count); } } } }
b337792a63bbb09d149f32ba45e2fc77d5fac438.cu
/// LSU EE 7722 GPU Microarchitecture // /// Homework 3 - Spring 2018 // // Assignment: http://www.ece.lsu.edu/koppel/gp/2018/hw03.pdf /// Documentation // // c++: http://en.cppreference.com // CUDA: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html // CUDA debugger: https://docs.nvidia.com/cuda/cuda-gdb/index.html #include <pthread.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <ctype.h> #include <time.h> #include <new> #include <cuda_runtime.h> #include <assert.h> #include <nperf.h> #include "util.h" #include <ptable.h> // Maximum size of matrix. #define N 64 // Make it easy to switch between float and double for vertex and matrix // elements. // typedef float Elt_Type; struct App { // Number of input and output vectors, that is, the size of the // input and output arrays. // int num_vecs; Elt_Type matrix[N][N]; // Host pointers to the input and output arrays, and to a CPU-computed // output array used for checking results. // Elt_Type *h_in, *h_out, *h_out_check; // // Note: h_in points to an array holding num_vecs N-element vectors, // and so the total size of h_in is num_vects * N elements. // GPU pointers to the input and output arrays. // Elt_Type *d_in, *d_out; // GPU pointers to the input and output arrays, cast to float4s. // float4 *d_in_f4, *d_out_f4; // // The compiler can emit more efficient load and store instructions // to float4 elements than to four consecutive floats. // // Note: These "_f4" pointers only work when Elt_Type is a float. // Size of matrix to use. int n; }; // In host address space. App app; // In device constant address space. __constant__ App d_app; typedef void (*KPtr)(Elt_Type *dout, const Elt_Type *din); template <int eN> __device__ void mxv_sh_ochunk() { /// DO NOT MODIFY THIS ROUTINE. USE IT FOR COMPARISON. // Instead, modify mxv_sh_ochunk_sol_mn. // Matrix size is eN by eN. // Since size in this case is a compile-time constant code will // be of higher quality. // First things, first. Make sure that the problem size // matches the specialization. const int tid = blockIdx.x * blockDim.x + threadIdx.x; assert( tid != 0 || eN == d_app.n ); const int CS = 8; // Chunk Size: Number of input vector components to read. const int num_threads = blockDim.x * gridDim.x; // First element used by this block. const int bl_start = blockIdx.x * blockDim.x / CS; const int stop = d_app.num_vecs; const int inc = num_threads / CS; const int thd_c_offset = threadIdx.x % CS; const int thd_r_offset = threadIdx.x % CS; const int thd_v_offset = threadIdx.x / CS; const int MAX_BLOCK_SIZE = 1024; __shared__ Elt_Type vxfer[MAX_BLOCK_SIZE]; // Maximum number of output vector components written per thread, // based on vector size, eN. const int NL = eN / CS; /// DO NOT MODIFY THIS ROUTINE. USE IT FOR COMPARISON. // Instead, modify mxv_sh_ochunk_sol_mn. for ( int hb = bl_start; hb<stop; hb += inc ) { // Initialize output vector components to zero. Elt_Type vout[NL]; for ( auto& v: vout ) v = 0; #pragma unroll for ( int c=0; c<eN; c += CS ) { // This thread along with CS-1 of its neighbors load CS // components of an input vector. // vxfer[threadIdx.x] = d_app.d_in[ ( hb + thd_v_offset ) * eN + c + thd_c_offset ]; // Copy input vector components from shared to local address space. // Elt_Type vin[CS]; for ( int cc=0; cc<CS; cc++ ) vin[cc] = vxfer[ thd_v_offset * CS + cc ]; // Using the CS input vector components, compute part // of our NL output vector components. for ( int rr=0; rr<NL; rr++ ) { const int r = rr * CS + thd_r_offset; for ( int cc=0; cc<CS; cc++ ) vout[rr] += d_app.matrix[r][c+cc] * vin[cc]; } } #pragma unroll // Write output vector components to global memory. for ( int rr=0; rr<NL; rr++ ) { const int r = rr * CS + thd_r_offset; d_app.d_out[ ( hb + thd_v_offset ) * eN + r ] = vout[rr]; } } } // Instantiate versions for 16 by 16 and 32 by 32 matrices. extern "C" __global__ void mxv_sh_ochunk_64(){ mxv_sh_ochunk<64>(); } extern "C" __global__ void mxv_sh_ochunk_32(){ mxv_sh_ochunk<32>(); } extern "C" __global__ void mxv_sh_ochunk_16(){ mxv_sh_ochunk<16>(); } extern "C" __global__ void mxv_sh_ochunk_mn() { /// DO NOT MODIFY THIS ROUTINE. USE IT FOR COMPARISON. // Instead, modify mxv_sh_ochunk_sol_mn. // Matrix size is in d_app structure. d_app.n by d_app.n. const int CS = 8; // Chunk Size: Number of input vector components to read. const int num_threads = blockDim.x * gridDim.x; // First element used by this block. const int bl_start = blockIdx.x * blockDim.x / CS; const int stop = d_app.num_vecs; const int inc = num_threads / CS; const int thd_c_offset = threadIdx.x % CS; const int thd_r_offset = thd_c_offset; const int thd_v_offset = threadIdx.x / CS; const int MAX_BLOCK_SIZE = 1024; __shared__ Elt_Type vxfer[MAX_BLOCK_SIZE]; const int n = d_app.n; // Maximum number of output vector components written per thread, // based on maximum vector size, N. const int NL = N / CS; // Number of output vector components written per thread based on // vector size, n. const int nl = n / CS; /// DO NOT MODIFY THIS ROUTINE. USE IT FOR COMPARISON. // Instead, modify mxv_sh_ochunk_sol_mn. for ( int hb = bl_start; hb<stop; hb += inc ) { // Initialize output vector components to zero. Note that the // code initializes for the maximum number of components // assigned to this thread, NL, not the actual number, nl or // nl-1. Elt_Type vout[NL]; for ( auto& v: vout ) v = 0; for ( int c=0; c<n; c += CS ) { // This thread along with CS-1 of its neighbors load CS // components of an input vector. // vxfer[threadIdx.x] = d_app.d_in[ ( hb + thd_v_offset ) * n + c + thd_c_offset ]; // Copy input vector components from shared to local address space. // Elt_Type vin[CS]; for ( int cc=0; cc<CS; cc++ ) vin[cc] = vxfer[ thd_v_offset * CS + cc ]; // Using the CS input vector components, compute part // of our nl output vector components. for ( int rr=0; rr<nl; rr++ ) { const int r = rr * CS + thd_r_offset; for ( int cc=0; cc<CS; cc++ ) vout[rr] += d_app.matrix[r][c+cc] * vin[cc]; } } /// DO NOT MODIFY THIS ROUTINE. USE IT FOR COMPARISON. // Instead, modify mxv_sh_ochunk_sol_mn. // Write output vector components to global memory. for ( int rr=0; rr<nl; rr++ ) { const int r = rr * CS + thd_r_offset; d_app.d_out[ ( hb + thd_v_offset ) * n + r ] = vout[rr]; } } /// DO NOT MODIFY THIS ROUTINE. USE IT FOR COMPARISON. // Instead, modify mxv_sh_ochunk_sol_mn. } extern "C" __global__ void mxv_sh_ochunk_sol_mn() { /// PUT SOLUTION IN ↓↓ THIS ROUTINE ↓↓. // Matrix size is in d_app structure. d_app.n by d_app.n. const int CS = 8; // Chunk Size: Number of input vector components to read. const int num_threads = blockDim.x * gridDim.x; // First element used by this block. const int bl_start = blockIdx.x * blockDim.x / CS; const int stop = d_app.num_vecs; const int inc = num_threads / CS; const int thd_c_offset = threadIdx.x % CS; const int thd_r_offset = thd_c_offset; const int thd_v_offset = threadIdx.x / CS; const int MAX_BLOCK_SIZE = 1024; __shared__ Elt_Type vxfer[MAX_BLOCK_SIZE]; const int n = d_app.n; // Maximum number of output vector components written per thread, // based on maximum vector size, N. const int NL = N / CS; // Number of output vector components written per thread based on // vector size, n. const int nl = n / CS; /// PUT SOLUTION IN ↕↕ THIS ROUTINE ↕↕. for ( int hb = bl_start; hb<stop; hb += inc ) { // Initialize output vector components to zero. Note that the // code initializes for the maximum number of components // assigned to this thread, NL, not the actual number, nl or // nl-1. Elt_Type vout[NL]; for ( auto& v: vout ) v = 0; for ( int c=0; c<n; c += CS ) { // This thread along with CS-1 of its neighbors load CS // components of an input vector. // vxfer[threadIdx.x] = d_app.d_in[ ( hb + thd_v_offset ) * n + c + thd_c_offset ]; // Copy input vector components from shared to local address space. // Elt_Type vin[CS]; for ( int cc=0; cc<CS; cc++ ) vin[cc] = vxfer[ thd_v_offset * CS + cc ]; // Using the CS input vector components, compute part // of our nl output vector components. for ( int rr=0; rr<nl; rr++ ) { const int r = rr * CS + thd_r_offset; for ( int cc=0; cc<CS; cc++ ) vout[rr] += d_app.matrix[r][c+cc] * vin[cc]; } } // Write output vector components to global memory. for ( int rr=0; rr<nl; rr++ ) { const int r = rr * CS + thd_r_offset; d_app.d_out[ ( hb + thd_v_offset ) * n + r ] = vout[rr]; } } /// PUT SOLUTION IN ↑↑ THIS ROUTINE ↑↑. } GPU_Info print_gpu_and_kernel_info() { GPU_Info info; gpu_info_print(); // Determine which GPU to use. (For starters, if there's more than // one, choose the one connected to the display.) // int dev = gpu_choose_index(); CE(cudaSetDevice(dev)); printf("Using GPU %d\n",dev); info.get_gpu_info(dev); #if 0 info.GET_INFO(mxv_sh_ochunk_sol_mn); #else info.GET_INFO(mxv_sh_ochunk_16); info.GET_INFO(mxv_sh_ochunk_32); info.GET_INFO(mxv_sh_ochunk_64); info.GET_INFO(mxv_sh_ochunk_sol_mn); info.GET_INFO(mxv_sh_ochunk_mn); #endif // Print information about kernel. // printf("\nCUDA Kernel Resource Usage:\n"); for ( int i=0; i<info.num_kernels; i++ ) { printf("For %s:\n", info.ki[i].name); printf(" %6zd shared, %zd const, %zd loc, %d regs; " "%d max threads per block.\n", info.ki[i].cfa.sharedSizeBytes, info.ki[i].cfa.constSizeBytes, info.ki[i].cfa.localSizeBytes, info.ki[i].cfa.numRegs, info.ki[i].cfa.maxThreadsPerBlock); } return info; } int main(int argc, char **argv) { // When debug true: matrix is identity and for each vector v_i=i // (component i is set to value i). const bool debug = false; // Must be called before any CUDA API calls. NPerf_init(); // Get info about GPU and each kernel. // GPU_Info info = print_gpu_and_kernel_info(); const int num_mp = info.cuda_prop.multiProcessorCount; // Examine argument 1, block count, default is number of MPs. // const int arg1_int = argc < 2 ? num_mp : atoi(argv[1]); const int num_blocks = arg1_int == 0 ? num_mp : arg1_int < 0 ? -arg1_int * num_mp : arg1_int; // Examine argument 2, number of threads per block. // const bool opt_p = argc >= 3 && string(argv[2]) == "p"; const int thd_per_block_arg = argc < 3 ? 1024 : opt_p ? 0 : atoi(argv[2]); const int thd_per_block_goal = thd_per_block_arg == 0 ? 1024 : thd_per_block_arg; const int num_threads = num_blocks * thd_per_block_goal; const bool vary_warps = thd_per_block_arg == 0; // Examine argument 3, size of array in MiB. Fractional values okay. // app.num_vecs = argc < 4 ? 1 << 20 : int( atof(argv[3]) * (1<<20) ); if ( num_threads <= 0 || app.num_vecs <= 0 ) { printf("Usage: %s [ NUM_CUDA_BLOCKS ] [THD_PER_BLOCK|p] " "[DATA_SIZE_MiB]\n", argv[0]); exit(1); } // Collect performance data using a wrapper to NVIDIA CUPTI event // counter API. // NPerf_metric_collect("inst_executed"); if ( opt_p ) { NPerf_metric_collect("gld_efficiency"); NPerf_metric_collect("l2_read_throughput"); NPerf_metric_collect("l2_write_throughput"); NPerf_metric_collect("flop_sp_efficiency"); NPerf_metric_collect("shared_efficiency"); } // // Note: The more metrics that are collected, the more times a kernel // will need to be run. if ( false ) NPerf_metrics_off(); const size_t in_size_elts = size_t(app.num_vecs) * N; const size_t in_size_bytes = in_size_elts * sizeof( app.h_in[0] ); const size_t out_size_elts = size_t(app.num_vecs) * N; const size_t out_size_bytes = out_size_elts * sizeof( app.h_out[0] ); const int overrun_size_elts = thd_per_block_goal * N * 32; const int overrun_size_bytes = overrun_size_elts * sizeof( app.h_out[0] ); // Allocate storage for CPU copy of data. // app.h_in = new Elt_Type[ in_size_elts ]; app.h_out = new Elt_Type[ out_size_elts ]; app.h_out_check = NULL; // Allocate storage for GPU copy of data. // CE( cudaMalloc( &app.d_in, in_size_bytes + overrun_size_bytes ) ); app.d_in_f4 = (float4*) app.d_in; CE( cudaMalloc( &app.d_out, out_size_bytes + overrun_size_bytes ) ); app.d_out_f4 = (float4*) app.d_out; printf ("Max matrix: %d x %d. Num vectors (S): %d. Grid size: %d blocks.\n", N, N, app.num_vecs, num_blocks); printf("Elements per thread: %.1f (4 wp) - %.1f (32 wp)\n", double(app.num_vecs) / ( num_blocks * 4 * 32 ), double(app.num_vecs) / ( num_blocks * 32 * 32 )); // Initialize input array. // for ( int i=0; i<app.num_vecs; i++ ) for ( int c=0; c<N; c++ ) app.h_in[ i * N + c ] = debug ? Elt_Type(c) : drand48(); // Initialize matrix. // for ( int r=0; r<N; r++ ) for ( int c=0; c<N; c++ ) app.matrix[r][c] = debug ? r == c : drand48(); struct Shape { Shape(int np):n(np),h_out_check(app.num_vecs*np){}; Shape():n(0){}; int n; vector<Elt_Type> h_out_check; }; vector<int> sizes = { 64, 32, 16 }; map<int,Shape> shapes; for ( auto n: sizes ) shapes.emplace(n,n); // Compute correct answers. // for ( auto& sh: shapes ) { Shape& s = sh.second; assert( s.n <= N ); for ( int i=0; i<app.num_vecs; i++ ) for ( int r=0; r<s.n; r++ ) { s.h_out_check[ i * s.n + r ] = 0; for ( int c=0; c<s.n; c++ ) s.h_out_check[ i * s.n + r ] += app.h_in[ i * s.n + c ] * app.matrix[r][c]; } } app.h_out_check = shapes[N].h_out_check.data(); struct KShape { KShape(int knop, Shape *sp, Kernel_Info *kip):kno(knop),s(sp),ki(kip){}; const int kno; Shape* const s; Kernel_Info* const ki; }; vector<KShape> kshapes; for ( int kernel = 0; kernel < info.num_kernels; kernel++ ) { Kernel_Info* const ki = &info.ki[kernel]; string kn = ki->name; // Eagerly awaiting c++20 ends_with. RHEL should have it by 2035. string suffix = kn.substr(kn.size()-3,3); if ( suffix == "_mn" ) for ( auto& sh: shapes ) kshapes.emplace_back(kernel,&sh.second,ki); else if ( suffix == "_64" ) kshapes.emplace_back(kernel,&shapes[64],ki); else if ( suffix == "_32" ) kshapes.emplace_back(kernel,&shapes[32],ki); else if ( suffix == "_16" ) kshapes.emplace_back(kernel,&shapes[16],ki); } double elapsed_time_s = 86400; // Reassigned to minimum run time. const int output_width = stdout_width_get(); { // Prepare events used for timing. // cudaEvent_t gpu_start_ce, gpu_stop_ce; CE(cudaEventCreate(&gpu_start_ce)); CE(cudaEventCreate(&gpu_stop_ce)); // Copy input array from CPU to GPU. // CE( cudaMemcpy ( app.d_in, app.h_in, in_size_bytes, cudaMemcpyHostToDevice ) ); // Launch kernel multiple times and keep track of the best time. printf("Launching with %d blocks of up to %d threads. \n", num_blocks, thd_per_block_goal); for ( KShape ks: kshapes ) { const int kernel = ks.kno; Kernel_Info* const ki = ks.ki; cudaFuncAttributes& cfa = ki->cfa; const int wp_limit = cfa.maxThreadsPerBlock >> 5; const int thd_limit = wp_limit << 5; const int thd_per_block_no_vary = min(thd_per_block_goal,thd_limit); const int wp_start = 4; const int wp_stop = vary_warps ? wp_limit : wp_start; const int wp_inc = 4; app.n = ks.s ? ks.s->n : N; assert( app.n > 0 ); const int64_t num_ops = // Multiply-adds. int64_t(app.n) * app.n * app.num_vecs; // Amount of data in and out of GPU chip. const int64_t amt_data_bytes = sizeof(app.h_in[0]) * app.num_vecs * ( app.n + app.n ); // Copy App structure to GPU. // CE( cudaMemcpyToSymbol ( d_app, &app, sizeof(app), 0, cudaMemcpyHostToDevice ) ); pString msize = pStringF("(%d,%d)",app.n,app.n); pTable table; table.stream = stdout; for ( int wp_cnt = 0, wp_iter = wp_start; wp_cnt < wp_stop && ( wp_cnt = min(wp_iter,wp_stop) ); wp_iter += wp_inc ) { const int thd_per_block = vary_warps ? wp_cnt << 5 : thd_per_block_no_vary; // Zero the output array. // CE(cudaMemset(app.d_out,0,out_size_bytes)); // Measure execution time starting "now", which is after data // set to GPU. // CE(cudaEventRecord(gpu_start_ce,0)); // Launch Kernel // for ( NPerf_data_reset(); NPerf_need_run_get(); ) KPtr(info.ki[kernel].func_ptr)<<<num_blocks,thd_per_block>>> (app.d_out,app.d_in); // Stop measuring execution time now, which is before is data // returned from GPU. // CE(cudaEventRecord(gpu_stop_ce,0)); CE(cudaEventSynchronize(gpu_stop_ce)); float cuda_time_ms = -1.1; CE(cudaEventElapsedTime(&cuda_time_ms,gpu_start_ce,gpu_stop_ce)); const double this_elapsed_time_s = NPerf_metrics_collection_get() ? NPerf_kernel_et_get() : cuda_time_ms * 0.001; const double thpt_compute_gflops = num_ops / this_elapsed_time_s * 1e-9; const double thpt_data_gbps = amt_data_bytes / this_elapsed_time_s * 1e-9; if ( vary_warps ) { const double comp_frac = 1e9 * thpt_compute_gflops / ( sizeof(Elt_Type) == 4 ? info.chip_sp_flops : sizeof(Elt_Type) == 8 ? info.chip_dp_flops : 1 ); const double comm_frac = min(2.0,1e9 * thpt_data_gbps / info.chip_bw_Bps); // Number of warps, rounded up. // const int num_wps = ( thd_per_block + 31 ) >> 5; // The maximum number of active blocks per MP for this // kernel when launched with a block size of thd_per_block. // const int max_bl_per_mp = info.get_max_active_blocks_per_mp(kernel,thd_per_block); // Compute number of blocks available per MP based only on // the number of blocks. This may be larger than the // number of blocks that can run. // const int bl_per_mp_available = 0.999 + double(num_blocks) / num_mp; // The number of active blocks is the minimum of what // can fit and how many are available. // const int bl_per_mp = min( bl_per_mp_available, max_bl_per_mp ); // Based on the number of blocks, compute the num ber of warps. // const int act_wps = num_wps * bl_per_mp; if ( wp_cnt == wp_start ) printf("Kernel %s%s:\n", info.ki[kernel].name, msize.s); table.row_start(); table.entry("wp",num_wps); table.entry("ac",act_wps); table.entry("t/µs","%6.0f", this_elapsed_time_s * 1e6); table.entry ("I/op","%4.1f", NPerf_metric_value_get("inst_executed") * 32.0 / num_ops ); if ( opt_p ) { table.entry ("Ld eff","%5.1f%%", NPerf_metric_value_get("gld_efficiency")); table.entry ("SM eff","%5.1f%%", NPerf_metric_value_get("shared_efficiency")); table.entry ("L2rθ","%5.1f", NPerf_metric_value_get("l2_read_throughput") * 1e-9 ); table.entry ("L2wθ","%5.1f", NPerf_metric_value_get("l2_write_throughput") * 1e-9 ); table.entry ("FP%","%5.1f%%", NPerf_metric_value_get("flop_sp_efficiency")); } const bool plot_bandwidth = false; table.entry("GB/s","%4.0f", thpt_data_gbps); table.entry("FP θ","%4.0f", thpt_compute_gflops); const int max_st_len = max(5, output_width - 1 - table.row_len_get() ); pStringF fmt("%%-%ds",max_st_len); string util_hdr = plot_bandwidth ? "Bandwidth Util" : "FP Utilization"; const double frac = plot_bandwidth ? comm_frac : comp_frac; util_hdr += string(max_st_len - util_hdr.length(),'-'); table.entry (util_hdr,fmt, string( size_t(max(0.0,frac*max_st_len)), '*' ), pTable::pT_Left); } else { printf ("%-20s %2d wp %7.0f µs %8.3f GF %8.3f GB/s " "%5.2f I/F\n", info.ki[kernel].name, (thd_per_block + 31 ) >> 5, this_elapsed_time_s * 1e6, thpt_compute_gflops, thpt_data_gbps, NPerf_metric_value_get("inst_executed") * 32 / num_ops ); } table.row_end(); elapsed_time_s = min(this_elapsed_time_s,elapsed_time_s); // Copy output array from GPU to CPU. // CE( cudaMemcpy ( app.h_out, app.d_out, out_size_bytes, cudaMemcpyDeviceToHost) ); int err_count = 0; Elt_Type* const h_out_check = ks.s ? ks.s->h_out_check.data() : app.h_out_check; for ( int i=0; i<app.num_vecs; i++ ) for ( int r=0; r<app.n; r++ ) { const int idx = i * app.n + r; if ( fabs( h_out_check[idx] - app.h_out[idx] ) > 1e-5 ) { err_count++; if ( err_count < 5 ) printf ("Error at vec %d elt %d: %.7f != %.7f (correct)\n", i, r, app.h_out[idx], h_out_check[idx] ); } } if ( err_count ) printf("Total errors %d\n", err_count); } } } }
92ade62b206de268911bd880c0d677185b1b049d.hip
// !!! This is a file automatically generated by hipify!!! #ifndef opencl_translator_cu // pragma once #define opencl_translator_cu #ifdef __NVCC__ #ifndef STATIC_KEYWORD #define STATIC_KEYWORD __device__ #endif // See https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/functionQualifiers.html #define vec_type_hint(typen) #define work_group_size_hint(X, Y, Z) #define reqd_work_group_size(X, Y, Z) #define __kernel __global__ #define __global #define __local __shared__ #define __constant __constant__ typedef unsigned int uint; // https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/barrier.html enum cl_mem_fence_flags { CLK_LOCAL_MEM_FENCE, CLK_GLOBAL_MEM_FENCE }; STATIC_KEYWORD void barrier(cl_mem_fence_flags flags) { __syncthreads(); } // https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/workItemFunctions.html STATIC_KEYWORD size_t getXYZByIndex(dim3 xyz, uint dimindx) { if (dimindx == 2) { return xyz.z; } else if (dimindx == 1) { return xyz.y; } else { return xyz.x; } } STATIC_KEYWORD size_t get_global_size (uint dimindx) { return getXYZByIndex(gridDim, dimindx) * getXYZByIndex(blockDim, dimindx); } STATIC_KEYWORD size_t get_global_id (uint dimindx) { return getXYZByIndex(blockIdx, dimindx) * getXYZByIndex(blockDim, dimindx) + getXYZByIndex(threadIdx, dimindx); } STATIC_KEYWORD size_t get_local_size (uint dimindx) { return getXYZByIndex(blockDim, dimindx); } STATIC_KEYWORD size_t get_local_id (uint dimindx) { return getXYZByIndex(threadIdx, dimindx); } STATIC_KEYWORD size_t get_num_groups (uint dimindx) { return getXYZByIndex(gridDim, dimindx); } STATIC_KEYWORD size_t get_group_id (uint dimindx) { return getXYZByIndex(blockIdx, dimindx); } STATIC_KEYWORD uint get_work_dim() { if (get_global_size(2) > 1) { return 3; } else if (get_global_size(1) > 1) { return 2; } else { return 1; } } #define WARP_SIZE 32 #endif #ifdef __CUDA_ARCH__ #define DEVICE_CODE #else #define HOST_CODE #endif #include <libgpu/work_size.h> #include <libgpu/shared_device_buffer.h> #include <libgpu/cuda/utils.h> #include <hip/hip_runtime_api.h> #endif // pragma once
92ade62b206de268911bd880c0d677185b1b049d.cu
#ifndef opencl_translator_cu // pragma once #define opencl_translator_cu #ifdef __NVCC__ #ifndef STATIC_KEYWORD #define STATIC_KEYWORD __device__ #endif // See https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/functionQualifiers.html #define vec_type_hint(typen) #define work_group_size_hint(X, Y, Z) #define reqd_work_group_size(X, Y, Z) #define __kernel __global__ #define __global #define __local __shared__ #define __constant __constant__ typedef unsigned int uint; // https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/barrier.html enum cl_mem_fence_flags { CLK_LOCAL_MEM_FENCE, CLK_GLOBAL_MEM_FENCE }; STATIC_KEYWORD void barrier(cl_mem_fence_flags flags) { __syncthreads(); } // https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/workItemFunctions.html STATIC_KEYWORD size_t getXYZByIndex(dim3 xyz, uint dimindx) { if (dimindx == 2) { return xyz.z; } else if (dimindx == 1) { return xyz.y; } else { return xyz.x; } } STATIC_KEYWORD size_t get_global_size (uint dimindx) { return getXYZByIndex(gridDim, dimindx) * getXYZByIndex(blockDim, dimindx); } STATIC_KEYWORD size_t get_global_id (uint dimindx) { return getXYZByIndex(blockIdx, dimindx) * getXYZByIndex(blockDim, dimindx) + getXYZByIndex(threadIdx, dimindx); } STATIC_KEYWORD size_t get_local_size (uint dimindx) { return getXYZByIndex(blockDim, dimindx); } STATIC_KEYWORD size_t get_local_id (uint dimindx) { return getXYZByIndex(threadIdx, dimindx); } STATIC_KEYWORD size_t get_num_groups (uint dimindx) { return getXYZByIndex(gridDim, dimindx); } STATIC_KEYWORD size_t get_group_id (uint dimindx) { return getXYZByIndex(blockIdx, dimindx); } STATIC_KEYWORD uint get_work_dim() { if (get_global_size(2) > 1) { return 3; } else if (get_global_size(1) > 1) { return 2; } else { return 1; } } #define WARP_SIZE 32 #endif #ifdef __CUDA_ARCH__ #define DEVICE_CODE #else #define HOST_CODE #endif #include <libgpu/work_size.h> #include <libgpu/shared_device_buffer.h> #include <libgpu/cuda/utils.h> #include <cuda_runtime_api.h> #endif // pragma once
c300584cf801c295f23ca06d20ff8f5c46312eda.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> __global__ void deviceburst(float *x, float *initsums, int n, int k, float *bigmaxs, int *startend) { int partition = (n - k + 1) / (blockDim.x * gridDim.x) + 1; int me = blockIdx.x * blockDim.x + threadIdx.x; int left = me * partition; int left_limit = left + partition; int length = k; float sum = initsums[me]; float mean = sum / length; startend[me * 2] = left; startend[me * 2 + 1] = left + length - 1; bigmaxs[me] = mean; while (left + length < n && left < left_limit) { float next = x[left + length]; if (next > mean) { if (next > x[left]) { sum = sum + next - x[left]; left += 1; } else { sum = sum + next; length += 1; } } else { for (int i = 0; i <= length - k + 1; i++) { sum = sum - x[left]; } left += length - k + 1; length = k; sum = sum + x[left + length]; } mean = sum / length; if (mean > bigmaxs[me]) { startend[me * 2] = left; startend[me * 2 + 1] = left + length - 1; bigmaxs[me] = mean; } } } float arraysum(float *x, int n, int start, int end) { float sum = 0; for (int i = start; i < n && i < end; i++) { sum = sum + x[i]; } return sum; } int arraymaxidx(float *x, int n) { float max = x[0]; int maxidx = 0; for (int i = 1; i < n; i++) { if (x[i] > max) { max = x[i]; maxidx = i; } } return maxidx; } void maxburst(float *x, int n, int k, int *startend, float *bigmax) { int gridDimX = 128; int blockDimX = 256; int threads_count = gridDimX * blockDimX; float *device_x; hipMalloc((void **)&device_x, sizeof(float) * n); hipMemcpy(device_x, x, sizeof(float) * n, hipMemcpyHostToDevice); float *device_bigmaxs; hipMalloc((void **)&device_bigmaxs, sizeof(float) * threads_count); int *device_startends; hipMalloc((void **)&device_startends, sizeof(int) * threads_count * 2); float *initsums = (float *)malloc(sizeof(float) * threads_count); int partition = (n - k + 1) / threads_count + 1; for (int i = 0; i < threads_count; i++) { initsums[i] = arraysum(x, n, i * partition, k); } float *device_initsums; hipMalloc((void **)&device_initsums, sizeof(float) * threads_count); hipMemcpy(device_initsums, initsums, sizeof(float) * threads_count, hipMemcpyHostToDevice); free(initsums); dim3 dimGrid(gridDimX, 1); dim3 dimBlock(blockDimX, 1, 1); hipLaunchKernelGGL(( deviceburst), dim3(dimGrid), dim3(dimBlock), 0, 0, device_x, device_initsums, n, k, device_bigmaxs, device_startends); hipDeviceSynchronize(); hipFree(device_x); float *bigmaxs = (float *)malloc(sizeof(float) * threads_count); hipMemcpy(bigmaxs, device_bigmaxs, sizeof(float) * threads_count, hipMemcpyDeviceToHost); hipFree(device_bigmaxs); int *startends = (int *)malloc(sizeof(int) * threads_count * 2); hipMemcpy(startends, device_startends, sizeof(int) * threads_count * 2, hipMemcpyDeviceToHost); hipFree(device_startends); int maxidx = arraymaxidx(bigmaxs, threads_count); bigmax[0] = bigmaxs[maxidx]; startend[0] = startends[maxidx * 2]; startend[1] = startends[maxidx * 2 + 1]; } // ------- // Testing // // CSIF // clear && /usr/local/cuda-8.0/bin/nvcc -Wno-deprecated-gpu-targets -g -G Skip.cu && a.out #include <stdio.h> #include <sys/time.h> int main() { int n = 50000; int k = 20000; float *x = (float *)malloc(sizeof(float) * n); srand(0); for (int i = 0; i < n; i++) { x[i] = (float)rand() / (float)(RAND_MAX / 100.0); } int startend[] = {0, 0}; float bigmax = 0; struct timeval start; gettimeofday(&start, NULL); maxburst(x, n, k, startend, &bigmax); struct timeval end; gettimeofday(&end, NULL); float duration = (end.tv_sec - start.tv_sec) * 1000.0 + (end.tv_usec - start.tv_usec) / 1000.0; printf("%f (from %d to %d) (%fms)\n", bigmax, startend[0], startend[1], duration); }
c300584cf801c295f23ca06d20ff8f5c46312eda.cu
#include <cuda.h> __global__ void deviceburst(float *x, float *initsums, int n, int k, float *bigmaxs, int *startend) { int partition = (n - k + 1) / (blockDim.x * gridDim.x) + 1; int me = blockIdx.x * blockDim.x + threadIdx.x; int left = me * partition; int left_limit = left + partition; int length = k; float sum = initsums[me]; float mean = sum / length; startend[me * 2] = left; startend[me * 2 + 1] = left + length - 1; bigmaxs[me] = mean; while (left + length < n && left < left_limit) { float next = x[left + length]; if (next > mean) { if (next > x[left]) { sum = sum + next - x[left]; left += 1; } else { sum = sum + next; length += 1; } } else { for (int i = 0; i <= length - k + 1; i++) { sum = sum - x[left]; } left += length - k + 1; length = k; sum = sum + x[left + length]; } mean = sum / length; if (mean > bigmaxs[me]) { startend[me * 2] = left; startend[me * 2 + 1] = left + length - 1; bigmaxs[me] = mean; } } } float arraysum(float *x, int n, int start, int end) { float sum = 0; for (int i = start; i < n && i < end; i++) { sum = sum + x[i]; } return sum; } int arraymaxidx(float *x, int n) { float max = x[0]; int maxidx = 0; for (int i = 1; i < n; i++) { if (x[i] > max) { max = x[i]; maxidx = i; } } return maxidx; } void maxburst(float *x, int n, int k, int *startend, float *bigmax) { int gridDimX = 128; int blockDimX = 256; int threads_count = gridDimX * blockDimX; float *device_x; cudaMalloc((void **)&device_x, sizeof(float) * n); cudaMemcpy(device_x, x, sizeof(float) * n, cudaMemcpyHostToDevice); float *device_bigmaxs; cudaMalloc((void **)&device_bigmaxs, sizeof(float) * threads_count); int *device_startends; cudaMalloc((void **)&device_startends, sizeof(int) * threads_count * 2); float *initsums = (float *)malloc(sizeof(float) * threads_count); int partition = (n - k + 1) / threads_count + 1; for (int i = 0; i < threads_count; i++) { initsums[i] = arraysum(x, n, i * partition, k); } float *device_initsums; cudaMalloc((void **)&device_initsums, sizeof(float) * threads_count); cudaMemcpy(device_initsums, initsums, sizeof(float) * threads_count, cudaMemcpyHostToDevice); free(initsums); dim3 dimGrid(gridDimX, 1); dim3 dimBlock(blockDimX, 1, 1); deviceburst<<<dimGrid, dimBlock>>>(device_x, device_initsums, n, k, device_bigmaxs, device_startends); cudaThreadSynchronize(); cudaFree(device_x); float *bigmaxs = (float *)malloc(sizeof(float) * threads_count); cudaMemcpy(bigmaxs, device_bigmaxs, sizeof(float) * threads_count, cudaMemcpyDeviceToHost); cudaFree(device_bigmaxs); int *startends = (int *)malloc(sizeof(int) * threads_count * 2); cudaMemcpy(startends, device_startends, sizeof(int) * threads_count * 2, cudaMemcpyDeviceToHost); cudaFree(device_startends); int maxidx = arraymaxidx(bigmaxs, threads_count); bigmax[0] = bigmaxs[maxidx]; startend[0] = startends[maxidx * 2]; startend[1] = startends[maxidx * 2 + 1]; } // ------- // Testing // // CSIF // clear && /usr/local/cuda-8.0/bin/nvcc -Wno-deprecated-gpu-targets -g -G Skip.cu && a.out #include <stdio.h> #include <sys/time.h> int main() { int n = 50000; int k = 20000; float *x = (float *)malloc(sizeof(float) * n); srand(0); for (int i = 0; i < n; i++) { x[i] = (float)rand() / (float)(RAND_MAX / 100.0); } int startend[] = {0, 0}; float bigmax = 0; struct timeval start; gettimeofday(&start, NULL); maxburst(x, n, k, startend, &bigmax); struct timeval end; gettimeofday(&end, NULL); float duration = (end.tv_sec - start.tv_sec) * 1000.0 + (end.tv_usec - start.tv_usec) / 1000.0; printf("%f (from %d to %d) (%fms)\n", bigmax, startend[0], startend[1], duration); }
c6ce66bd9e76f8824a5fdba1ae0f6e3258b15ab6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <hip/hip_runtime.h> __global__ void divideProcessKernel(float *d_A, int wA,int k) { int iIdx = blockDim.x*blockIdx.x + threadIdx.x+k+1; int kIdx = k;; if (iIdx>wA-1) { return; } d_A[iIdx*wA+kIdx] = d_A[iIdx*wA+kIdx]/ d_A[kIdx*wA+kIdx]; } // Kernel by using shared memory __global__ void updateProcessKernel_shared(float *d_A, int wA,int k) { int i = blockDim.x*blockIdx.x + threadIdx.x+k+1; int j = blockDim.y*blockIdx.y + threadIdx.y+k+1; if (i>wA-1 || j>wA-1) { return; } extern __shared__ float shared[]; if (threadIdx.x==0) { shared[threadIdx.y] = d_A[j*wA+k]; shared[blockDim.x+threadIdx.y] = d_A[k*wA+j]; } __syncthreads(); int idx = i*wA+j; d_A[idx] = d_A[idx] - shared[threadIdx.x]*shared[threadIdx.y+blockDim.x]; } // GPU version2 void LUDecomposition_GPU_shared(float *d_A,int wA) { dim3 ThreadDiv(512,1,1); dim3 BlockDiv((wA+ThreadDiv.x-1)/ThreadDiv.x,1,1); for (int k=0; k<wA; k++ ) { hipLaunchKernelGGL(( divideProcessKernel), dim3(BlockDiv),dim3(ThreadDiv), 0, 0, d_A,wA,k); dim3 ThreadUpdate(32,16,1); dim3 BlockUpdate((wA+ThreadUpdate.x-k-1)/ThreadUpdate.x,(wA+ThreadUpdate.x-k-1)/ThreadUpdate.y,1); hipLaunchKernelGGL(( updateProcessKernel_shared), dim3(BlockUpdate),dim3(ThreadUpdate),(ThreadUpdate.x + ThreadUpdate.y) * sizeof(float), 0, d_A,wA,k); } } //vertify result void VertifyResult(float *LURes, float *A,int wA) { float *MulRes = new float[wA*wA]; memset(MulRes,0,sizeof(float)*wA*wA); float temp; for (int i=0; i<wA; i++)// { for (int j=0; j<wA; j++)// { for (int ii=0; ii<=i; ii++) { if (i==ii) { temp = 1; } else temp = LURes[i*wA+ii]; if (ii>j) { continue; } MulRes[i*wA+j] += temp*LURes[ii*wA+j]; } } } float temp2; bool bError = false; for (int i=0; i<wA; i++)// { for (int j=0; j<wA; j++)// { temp2 = abs(MulRes[i*wA+j] - A[i*wA+j]); if (temp2 > 1.000000E-01) { printf("Error:%f,%d %d,\n",temp2,i,j); bError = true; } } } if (!bError) { printf("Pass!\n"); } } void GenSimData(int wA) { float *A = new float[wA*wA]; srand(time(NULL)); for (int i=0; i<wA; i++) { for (int j=0; j<wA; j++) { A[i*wA+j] = j;//rand()%99; if (A[i*wA+j] ==0) { A[i*wA+j] ++; } } } // Save Test Date FILE *fp; fp = fopen("Input.txt","w"); if (fp == NULL) { return; } for (int i=0; i<wA; i++) { for (int j=0; j<wA; j++) { fprintf(fp,"%f ",A[i*wA+j]); } fprintf(fp,"\n"); } fclose(fp); delete[] A; A = NULL; } bool ReadSimData(float *A, int wA) { // Read Test Date FILE *fp; fp = fopen("Input.txt","r"); if (fp == NULL) { return false; } for (int i=0; i<wA; i++) { for (int j=0; j<wA; j++) { fscanf(fp,"%f ",&A[i*wA+j]); } } fclose(fp); return true; } bool SaveLuResult(float *A, int wA) { // Read Test Date FILE *fp; fp = fopen("LURes.txt","w"); if (fp == NULL) { return false; } for (int i=0; i<wA; i++) { for (int j=0; j<wA; j++) { fprintf(fp,"%f ",A[i*wA+j]); } fprintf(fp,"\n"); } fclose(fp); return true; } int main() { // GPU int wA = 512; float *A = new float[wA*wA]; GenSimData(wA); // Generate simulation Data and save file "Input.txt" ReadSimData(A,wA); float *LURes = new float[wA*wA]; float *d_A; hipMalloc((void**)&d_A,sizeof(float)*wA*wA); hipMemcpy(d_A,A,sizeof(float)*wA*wA,hipMemcpyHostToDevice); hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); LUDecomposition_GPU_shared(d_A,wA); hipEventRecord(stop,0); hipEventSynchronize( stop ); float costtime; hipEventElapsedTime(&costtime,start,stop); printf("Elapsed Time:%f\n",costtime); hipMemcpy(LURes,d_A,sizeof(float)*wA*wA,hipMemcpyDeviceToHost); SaveLuResult(LURes, wA); ////vertify result VertifyResult(LURes,A,wA); return 0; }
c6ce66bd9e76f8824a5fdba1ae0f6e3258b15ab6.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cuda.h> __global__ void divideProcessKernel(float *d_A, int wA,int k) { int iIdx = blockDim.x*blockIdx.x + threadIdx.x+k+1; int kIdx = k;; if (iIdx>wA-1) { return; } d_A[iIdx*wA+kIdx] = d_A[iIdx*wA+kIdx]/ d_A[kIdx*wA+kIdx]; } // Kernel by using shared memory __global__ void updateProcessKernel_shared(float *d_A, int wA,int k) { int i = blockDim.x*blockIdx.x + threadIdx.x+k+1; int j = blockDim.y*blockIdx.y + threadIdx.y+k+1; if (i>wA-1 || j>wA-1) { return; } extern __shared__ float shared[]; if (threadIdx.x==0) { shared[threadIdx.y] = d_A[j*wA+k]; shared[blockDim.x+threadIdx.y] = d_A[k*wA+j]; } __syncthreads(); int idx = i*wA+j; d_A[idx] = d_A[idx] - shared[threadIdx.x]*shared[threadIdx.y+blockDim.x]; } // GPU version2 void LUDecomposition_GPU_shared(float *d_A,int wA) { dim3 ThreadDiv(512,1,1); dim3 BlockDiv((wA+ThreadDiv.x-1)/ThreadDiv.x,1,1); for (int k=0; k<wA; k++ ) { divideProcessKernel<<<BlockDiv,ThreadDiv>>>(d_A,wA,k); dim3 ThreadUpdate(32,16,1); dim3 BlockUpdate((wA+ThreadUpdate.x-k-1)/ThreadUpdate.x,(wA+ThreadUpdate.x-k-1)/ThreadUpdate.y,1); updateProcessKernel_shared<<<BlockUpdate,ThreadUpdate,(ThreadUpdate.x + ThreadUpdate.y) * sizeof(float)>>>(d_A,wA,k); } } //vertify result void VertifyResult(float *LURes, float *A,int wA) { float *MulRes = new float[wA*wA]; memset(MulRes,0,sizeof(float)*wA*wA); float temp; for (int i=0; i<wA; i++)//–– { for (int j=0; j<wA; j++)//¡– { for (int ii=0; ii<=i; ii++) { if (i==ii) { temp = 1; } else temp = LURes[i*wA+ii]; if (ii>j) { continue; } MulRes[i*wA+j] += temp*LURes[ii*wA+j]; } } } float temp2; bool bError = false; for (int i=0; i<wA; i++)//–– { for (int j=0; j<wA; j++)//¡– { temp2 = abs(MulRes[i*wA+j] - A[i*wA+j]); if (temp2 > 1.000000E-01) { printf("Error:%f,%d %d,\n",temp2,i,j); bError = true; } } } if (!bError) { printf("Pass!\n"); } } void GenSimData(int wA) { float *A = new float[wA*wA]; srand(time(NULL)); for (int i=0; i<wA; i++) { for (int j=0; j<wA; j++) { A[i*wA+j] = j;//rand()%99; if (A[i*wA+j] ==0) { A[i*wA+j] ++; } } } // Save Test Date FILE *fp; fp = fopen("Input.txt","w"); if (fp == NULL) { return; } for (int i=0; i<wA; i++) { for (int j=0; j<wA; j++) { fprintf(fp,"%f ",A[i*wA+j]); } fprintf(fp,"\n"); } fclose(fp); delete[] A; A = NULL; } bool ReadSimData(float *A, int wA) { // Read Test Date FILE *fp; fp = fopen("Input.txt","r"); if (fp == NULL) { return false; } for (int i=0; i<wA; i++) { for (int j=0; j<wA; j++) { fscanf(fp,"%f ",&A[i*wA+j]); } } fclose(fp); return true; } bool SaveLuResult(float *A, int wA) { // Read Test Date FILE *fp; fp = fopen("LURes.txt","w"); if (fp == NULL) { return false; } for (int i=0; i<wA; i++) { for (int j=0; j<wA; j++) { fprintf(fp,"%f ",A[i*wA+j]); } fprintf(fp,"\n"); } fclose(fp); return true; } int main() { // GPU int wA = 512; float *A = new float[wA*wA]; GenSimData(wA); // Generate simulation Data and save file "Input.txt" ReadSimData(A,wA); float *LURes = new float[wA*wA]; float *d_A; cudaMalloc((void**)&d_A,sizeof(float)*wA*wA); cudaMemcpy(d_A,A,sizeof(float)*wA*wA,cudaMemcpyHostToDevice); cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); LUDecomposition_GPU_shared(d_A,wA); cudaEventRecord(stop,0); cudaEventSynchronize( stop ); float costtime; cudaEventElapsedTime(&costtime,start,stop); printf("Elapsed Time:%f\n",costtime); cudaMemcpy(LURes,d_A,sizeof(float)*wA*wA,cudaMemcpyDeviceToHost); SaveLuResult(LURes, wA); ////vertify result VertifyResult(LURes,A,wA); return 0; }
34f25de94b20ac2063881ed2b5bf275658f77803.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define WIDTH 10 #define HEIGHT 10 #define channels 3 #define Mask_width 5 #define Mask_radius Mask_width/2 #define O_TILE_WIDTH 12 #define BLOCK_WIDTH (O_TILE_WIDTH+Mask_width-1) #define min(x,y) ((x)<(y)?(x):(y)) #define max(x,y) ((x)>(y)?(x):(y)) #define clamp(x) (min(max((x),0.0),1.0)) void imageConvolution(float *input,float *output,const float* __restrict__ M,int width, int height, int ch) { int i=0,j=0,k=0,x=0,y=0,xOffset=0,yOffset=0; float accum =0.0,maskValue =0.0,imagePixel =0.0; for( i=0 ;i<height;i++){ for( j=0;j< width;j++){ for(k=0;k<ch;k++){ accum = 0; for(y = 0 ;y< Mask_width;y++){ for(x= 0;x< Mask_width; x++){ xOffset = j + x - Mask_radius; yOffset = i + y - Mask_radius; if (xOffset>=0 && xOffset < width && yOffset>=0 && yOffset < height){ imagePixel = input[(yOffset * width + xOffset) * channels + k]; maskValue = M[y*Mask_width+x]; accum += imagePixel * maskValue; } else accum +=0; } } output[(i * width + j)*channels + k] = accum;// (float) clamp(accum); } } } } __global__ void imageTiledConvolution_kernel(float *input,float *output,const float * __restrict__ M,int width, int height, int ch) { int i=0,j=0; int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y*O_TILE_WIDTH+ty; int col_o = blockIdx.x*O_TILE_WIDTH+tx; int row_i = row_o - Mask_radius; int col_i = col_o - Mask_radius; float cValue = 0.0f; __shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH][channels]; for(int chIdx=0;chIdx<ch;chIdx++){ if(row_i>=0 && row_i<height && col_i>=0 && col_i<width){ Ns[ty][tx][chIdx] = input[(row_i*width+col_i)*ch+chIdx]; }else{ Ns[ty][tx][chIdx] = 0.0f; } __syncthreads(); cValue = 0.0f; if(ty<O_TILE_WIDTH && tx<O_TILE_WIDTH){ for( i=0;i<Mask_width;i++){ for( j=0;j<Mask_width;j++){ cValue +=M[i*Mask_width+j]*Ns[ty+i][tx+j][chIdx]; } } } __syncthreads(); if(row_o<height && col_o<width && ty<O_TILE_WIDTH && tx<O_TILE_WIDTH) output[(row_o*width+col_o)*ch+chIdx] = cValue;//min(max(cValue,0),1); } } void loadData(float *input,float *output,float *maskData) { int i=0; for(i=0;i<WIDTH*HEIGHT*channels;i++) input[i] = 1.0; for(i=0;i<WIDTH*HEIGHT*channels;i++) output[i] = 0.0; for(i=0;i<Mask_width *Mask_width ;i++) maskData[i] = 1.0; } void dispRes(float *arr) { int i=0,j=0,k=0; printf("Results of the calculation\n"); for(k=0;k<channels;k++){ for(i=0;i<HEIGHT;i++){ for(j=0;j<WIDTH;j++){ printf("%2.1f ",arr[(i*WIDTH+j)*channels+k]); } printf("\n"); } printf("k = %d\n",k);system("pause"); } } int main(void) { int maskRows = Mask_width; int maskColumns = Mask_width; int imageChannels = channels; int imageWidth = WIDTH; int imageHeight = HEIGHT; float * hostInputImageData; float * hostOutputImageData; float * hostOutputImageDataCPU; float * hostMaskData; float * deviceInputImageData; float * deviceOutputImageData; float * deviceMaskData; //allocate Memory on the host hostInputImageData = (float*)malloc(imageWidth*imageHeight*channels*sizeof(float)); hostOutputImageData = (float*)malloc(imageWidth*imageHeight*channels*sizeof(float)); hostOutputImageDataCPU = (float*)malloc(imageWidth*imageHeight*channels*sizeof(float)); hostMaskData = (float*)malloc(Mask_width*Mask_width*sizeof(float)); //load data to host memory loadData(hostInputImageData,hostOutputImageData,hostMaskData); //cuda memory allocation on the device hipMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); hipMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); hipMalloc((void **) &deviceMaskData, maskRows * maskColumns * sizeof(float)); //cuda memory copy from host to device hipMemcpy(deviceInputImageData,hostInputImageData,imageWidth * imageHeight * imageChannels * sizeof(float),hipMemcpyHostToDevice); hipMemcpy(deviceMaskData,hostMaskData,maskRows * maskColumns * sizeof(float),hipMemcpyHostToDevice); dim3 DimGrid((imageWidth-1)/O_TILE_WIDTH+1,(imageHeight-1)/O_TILE_WIDTH+1,1); dim3 DimBlock(BLOCK_WIDTH, BLOCK_WIDTH,1); hipLaunchKernelGGL(( imageTiledConvolution_kernel), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceInputImageData,deviceOutputImageData,deviceMaskData,imageWidth,imageHeight,imageChannels); imageConvolution(hostInputImageData,hostOutputImageDataCPU,hostMaskData,imageWidth,imageHeight,imageChannels); //cuda memory copy from device to host hipMemcpy(hostOutputImageData,deviceOutputImageData,imageWidth * imageHeight * imageChannels * sizeof(float),hipMemcpyDeviceToHost); //dispRes(hostOutputImageDataCPU); dispRes(hostOutputImageData); free(hostInputImageData); free(hostOutputImageData); free(hostOutputImageDataCPU); free(hostMaskData); hipFree(deviceInputImageData); hipFree(deviceOutputImageData); hipFree(deviceMaskData); return 0; }
34f25de94b20ac2063881ed2b5bf275658f77803.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define WIDTH 10 #define HEIGHT 10 #define channels 3 #define Mask_width 5 #define Mask_radius Mask_width/2 #define O_TILE_WIDTH 12 #define BLOCK_WIDTH (O_TILE_WIDTH+Mask_width-1) #define min(x,y) ((x)<(y)?(x):(y)) #define max(x,y) ((x)>(y)?(x):(y)) #define clamp(x) (min(max((x),0.0),1.0)) void imageConvolution(float *input,float *output,const float* __restrict__ M,int width, int height, int ch) { int i=0,j=0,k=0,x=0,y=0,xOffset=0,yOffset=0; float accum =0.0,maskValue =0.0,imagePixel =0.0; for( i=0 ;i<height;i++){ for( j=0;j< width;j++){ for(k=0;k<ch;k++){ accum = 0; for(y = 0 ;y< Mask_width;y++){ for(x= 0;x< Mask_width; x++){ xOffset = j + x - Mask_radius; yOffset = i + y - Mask_radius; if (xOffset>=0 && xOffset < width && yOffset>=0 && yOffset < height){ imagePixel = input[(yOffset * width + xOffset) * channels + k]; maskValue = M[y*Mask_width+x]; accum += imagePixel * maskValue; } else accum +=0; } } output[(i * width + j)*channels + k] = accum;// (float) clamp(accum); } } } } __global__ void imageTiledConvolution_kernel(float *input,float *output,const float * __restrict__ M,int width, int height, int ch) { int i=0,j=0; int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y*O_TILE_WIDTH+ty; int col_o = blockIdx.x*O_TILE_WIDTH+tx; int row_i = row_o - Mask_radius; int col_i = col_o - Mask_radius; float cValue = 0.0f; __shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH][channels]; for(int chIdx=0;chIdx<ch;chIdx++){ if(row_i>=0 && row_i<height && col_i>=0 && col_i<width){ Ns[ty][tx][chIdx] = input[(row_i*width+col_i)*ch+chIdx]; }else{ Ns[ty][tx][chIdx] = 0.0f; } __syncthreads(); cValue = 0.0f; if(ty<O_TILE_WIDTH && tx<O_TILE_WIDTH){ for( i=0;i<Mask_width;i++){ for( j=0;j<Mask_width;j++){ cValue +=M[i*Mask_width+j]*Ns[ty+i][tx+j][chIdx]; } } } __syncthreads(); if(row_o<height && col_o<width && ty<O_TILE_WIDTH && tx<O_TILE_WIDTH) output[(row_o*width+col_o)*ch+chIdx] = cValue;//min(max(cValue,0),1); } } void loadData(float *input,float *output,float *maskData) { int i=0; for(i=0;i<WIDTH*HEIGHT*channels;i++) input[i] = 1.0; for(i=0;i<WIDTH*HEIGHT*channels;i++) output[i] = 0.0; for(i=0;i<Mask_width *Mask_width ;i++) maskData[i] = 1.0; } void dispRes(float *arr) { int i=0,j=0,k=0; printf("Results of the calculation\n"); for(k=0;k<channels;k++){ for(i=0;i<HEIGHT;i++){ for(j=0;j<WIDTH;j++){ printf("%2.1f ",arr[(i*WIDTH+j)*channels+k]); } printf("\n"); } printf("k = %d\n",k);system("pause"); } } int main(void) { int maskRows = Mask_width; int maskColumns = Mask_width; int imageChannels = channels; int imageWidth = WIDTH; int imageHeight = HEIGHT; float * hostInputImageData; float * hostOutputImageData; float * hostOutputImageDataCPU; float * hostMaskData; float * deviceInputImageData; float * deviceOutputImageData; float * deviceMaskData; //allocate Memory on the host hostInputImageData = (float*)malloc(imageWidth*imageHeight*channels*sizeof(float)); hostOutputImageData = (float*)malloc(imageWidth*imageHeight*channels*sizeof(float)); hostOutputImageDataCPU = (float*)malloc(imageWidth*imageHeight*channels*sizeof(float)); hostMaskData = (float*)malloc(Mask_width*Mask_width*sizeof(float)); //load data to host memory loadData(hostInputImageData,hostOutputImageData,hostMaskData); //cuda memory allocation on the device cudaMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); cudaMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); cudaMalloc((void **) &deviceMaskData, maskRows * maskColumns * sizeof(float)); //cuda memory copy from host to device cudaMemcpy(deviceInputImageData,hostInputImageData,imageWidth * imageHeight * imageChannels * sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(deviceMaskData,hostMaskData,maskRows * maskColumns * sizeof(float),cudaMemcpyHostToDevice); dim3 DimGrid((imageWidth-1)/O_TILE_WIDTH+1,(imageHeight-1)/O_TILE_WIDTH+1,1); dim3 DimBlock(BLOCK_WIDTH, BLOCK_WIDTH,1); imageTiledConvolution_kernel<<<DimGrid,DimBlock>>>(deviceInputImageData,deviceOutputImageData,deviceMaskData,imageWidth,imageHeight,imageChannels); imageConvolution(hostInputImageData,hostOutputImageDataCPU,hostMaskData,imageWidth,imageHeight,imageChannels); //cuda memory copy from device to host cudaMemcpy(hostOutputImageData,deviceOutputImageData,imageWidth * imageHeight * imageChannels * sizeof(float),cudaMemcpyDeviceToHost); //dispRes(hostOutputImageDataCPU); dispRes(hostOutputImageData); free(hostInputImageData); free(hostOutputImageData); free(hostOutputImageDataCPU); free(hostMaskData); cudaFree(deviceInputImageData); cudaFree(deviceOutputImageData); cudaFree(deviceMaskData); return 0; }
b0a2878b0a7f010e56ab07335ac3779986f6aad8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include <hipcub/hipcub.hpp> #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" namespace paddle { namespace operators { template <typename T> static __device__ __forceinline__ T Relu(T x) { return (x > 0) ? x : 0; } static __device__ __forceinline__ float RealSqrt(float x) { return sqrtf(x); } static __device__ __forceinline__ double RealSqrt(double x) { return sqrt(x); } template <typename T> struct PairForLayerNorm { __device__ __forceinline__ PairForLayerNorm() {} __device__ __forceinline__ PairForLayerNorm(const T& first, const T& second) : first_(first), second_(second) {} T first_; T second_; }; template <typename T> struct PairForLayerNormAddFunctor { __device__ __forceinline__ PairForLayerNorm<T> operator()( const PairForLayerNorm<T>& p1, const PairForLayerNorm<T>& p2) { return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_); } }; template <typename T, bool DoRelu, int BlockDim> __global__ void InplaceAddReluAddLayerNormKernel(const T* y, const T* bias_0, const T* bias_1, const T* scale, T* out, T* mean, T* variance, int M, int N, float epsilon) { using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<T>, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ T shared_mem[BlockDim + 2]; for (int i = blockIdx.x; i < M; i += gridDim.x) { int index = i * N + threadIdx.x; // The fisrt BlockDim elements will be saved to shared memory. int save_index = threadIdx.x; T* save_ptr = shared_mem; T sum_i = 0; T square_sum_i = 0; for (int j = threadIdx.x; j < N; j += blockDim.x) { T tmp_0 = out[index]; // Add bias T tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0; // Relu T tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1; // elementwise_add T tmp_3 = tmp_2 + y[index]; // Save save_ptr[save_index] = tmp_3; save_ptr = out; index += blockDim.x; save_index = index; // For layer_norm, reduce to calculate mean and std sum_i += tmp_3; square_sum_i += (tmp_3 * tmp_3); } auto pair = BlockReduce(temp_storage) .Reduce(PairForLayerNorm<T>(sum_i, square_sum_i), PairForLayerNormAddFunctor<T>()); if (threadIdx.x == 0) { T mean_i = static_cast<T>(pair.first_ / N); T variance_i = static_cast<T>(pair.second_ / N - mean_i * mean_i); shared_mem[BlockDim] = mean_i; shared_mem[BlockDim + 1] = variance_i; if (mean) { mean[blockIdx.x] = mean_i; } if (variance) { variance[blockIdx.x] = variance_i; } } __syncthreads(); T mean_i = shared_mem[BlockDim]; T std_i = static_cast<T>(RealSqrt(shared_mem[BlockDim + 1] + epsilon)); index = i * N + threadIdx.x; // First BlockDim elements loading from shared memory. save_index = threadIdx.x; save_ptr = shared_mem; // For layer_norm, calculate out for (int j = threadIdx.x; j < N; j += blockDim.x) { T tmp_0 = (save_ptr[save_index] - mean_i) / std_i; T tmp_1 = scale ? scale[j] * tmp_0 : tmp_0; out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1; save_ptr = out; index += blockDim.x; save_index = index; } } } template <typename T> class FusedFCElementwiseLayerNormOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<framework::Tensor>("X"); auto* w = ctx.Input<framework::Tensor>("W"); auto* out = ctx.Output<framework::Tensor>("Out"); auto w_dims = w->dims(); int N = w_dims[1]; int K = w_dims[0]; int M = framework::product(x->dims()) / K; const T* x_data = x->data<T>(); const T* w_data = w->data<T>(); T* out_data = out->mutable_data<T>(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx); blas.GEMM(false, false, M, N, K, static_cast<T>(1.0), x_data, K, w_data, N, static_cast<T>(0.0), out_data, N); auto* y = ctx.Input<framework::Tensor>("Y"); auto* bias_0 = ctx.Input<framework::Tensor>("Bias0"); auto* bias_1 = ctx.Input<framework::Tensor>("Bias1"); auto* scale = ctx.Input<framework::Tensor>("Scale"); const T* y_data = y->data<T>(); const T* bias_0_data = bias_0 ? bias_0->data<T>() : nullptr; const T* bias_1_data = bias_1 ? bias_1->data<T>() : nullptr; const T* scale_data = scale ? scale->data<T>() : nullptr; auto* mean = ctx.Output<framework::Tensor>("Mean"); auto* variance = ctx.Output<framework::Tensor>("Variance"); T* mean_data = mean ? mean->mutable_data<T>(ctx.GetPlace()) : nullptr; T* variance_data = variance ? variance->mutable_data<T>(ctx.GetPlace()) : nullptr; bool with_relu = (ctx.Attr<std::string>("activation_type") == "relu") ? true : false; float epsilon = ctx.Attr<float>("epsilon"); int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); if (with_relu) { switch (platform::RoundToPowerOfTwo(N)) { CUDA_LAUNCH_KERNEL_HELPER( hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel< T, true, kPowerOfTwoDim>), dim3(::max(max_threads / kPowerOfTwoDim, 1)), dim3(kPowerOfTwoDim), 0, dev_ctx.stream(), y_data, bias_0_data, bias_1_data, scale_data, out_data, mean_data, variance_data, M, N, epsilon)); } } else { switch (platform::RoundToPowerOfTwo(N)) { CUDA_LAUNCH_KERNEL_HELPER( hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel< T, false, kPowerOfTwoDim>), dim3(::max(max_threads / kPowerOfTwoDim, 1)), dim3(kPowerOfTwoDim), 0, dev_ctx.stream(), y_data, bias_0_data, bias_1_data, scale_data, out_data, mean_data, variance_data, M, N, epsilon)); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(fused_fc_elementwise_layernorm, ops::FusedFCElementwiseLayerNormOpKernel<float>);
b0a2878b0a7f010e56ab07335ac3779986f6aad8.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include <cub/cub.cuh> #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" namespace paddle { namespace operators { template <typename T> static __device__ __forceinline__ T Relu(T x) { return (x > 0) ? x : 0; } static __device__ __forceinline__ float RealSqrt(float x) { return sqrtf(x); } static __device__ __forceinline__ double RealSqrt(double x) { return sqrt(x); } template <typename T> struct PairForLayerNorm { __device__ __forceinline__ PairForLayerNorm() {} __device__ __forceinline__ PairForLayerNorm(const T& first, const T& second) : first_(first), second_(second) {} T first_; T second_; }; template <typename T> struct PairForLayerNormAddFunctor { __device__ __forceinline__ PairForLayerNorm<T> operator()( const PairForLayerNorm<T>& p1, const PairForLayerNorm<T>& p2) { return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_); } }; template <typename T, bool DoRelu, int BlockDim> __global__ void InplaceAddReluAddLayerNormKernel(const T* y, const T* bias_0, const T* bias_1, const T* scale, T* out, T* mean, T* variance, int M, int N, float epsilon) { using BlockReduce = cub::BlockReduce<PairForLayerNorm<T>, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ T shared_mem[BlockDim + 2]; for (int i = blockIdx.x; i < M; i += gridDim.x) { int index = i * N + threadIdx.x; // The fisrt BlockDim elements will be saved to shared memory. int save_index = threadIdx.x; T* save_ptr = shared_mem; T sum_i = 0; T square_sum_i = 0; for (int j = threadIdx.x; j < N; j += blockDim.x) { T tmp_0 = out[index]; // Add bias T tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0; // Relu T tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1; // elementwise_add T tmp_3 = tmp_2 + y[index]; // Save save_ptr[save_index] = tmp_3; save_ptr = out; index += blockDim.x; save_index = index; // For layer_norm, reduce to calculate mean and std sum_i += tmp_3; square_sum_i += (tmp_3 * tmp_3); } auto pair = BlockReduce(temp_storage) .Reduce(PairForLayerNorm<T>(sum_i, square_sum_i), PairForLayerNormAddFunctor<T>()); if (threadIdx.x == 0) { T mean_i = static_cast<T>(pair.first_ / N); T variance_i = static_cast<T>(pair.second_ / N - mean_i * mean_i); shared_mem[BlockDim] = mean_i; shared_mem[BlockDim + 1] = variance_i; if (mean) { mean[blockIdx.x] = mean_i; } if (variance) { variance[blockIdx.x] = variance_i; } } __syncthreads(); T mean_i = shared_mem[BlockDim]; T std_i = static_cast<T>(RealSqrt(shared_mem[BlockDim + 1] + epsilon)); index = i * N + threadIdx.x; // First BlockDim elements loading from shared memory. save_index = threadIdx.x; save_ptr = shared_mem; // For layer_norm, calculate out for (int j = threadIdx.x; j < N; j += blockDim.x) { T tmp_0 = (save_ptr[save_index] - mean_i) / std_i; T tmp_1 = scale ? scale[j] * tmp_0 : tmp_0; out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1; save_ptr = out; index += blockDim.x; save_index = index; } } } template <typename T> class FusedFCElementwiseLayerNormOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<framework::Tensor>("X"); auto* w = ctx.Input<framework::Tensor>("W"); auto* out = ctx.Output<framework::Tensor>("Out"); auto w_dims = w->dims(); int N = w_dims[1]; int K = w_dims[0]; int M = framework::product(x->dims()) / K; const T* x_data = x->data<T>(); const T* w_data = w->data<T>(); T* out_data = out->mutable_data<T>(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx); blas.GEMM(false, false, M, N, K, static_cast<T>(1.0), x_data, K, w_data, N, static_cast<T>(0.0), out_data, N); auto* y = ctx.Input<framework::Tensor>("Y"); auto* bias_0 = ctx.Input<framework::Tensor>("Bias0"); auto* bias_1 = ctx.Input<framework::Tensor>("Bias1"); auto* scale = ctx.Input<framework::Tensor>("Scale"); const T* y_data = y->data<T>(); const T* bias_0_data = bias_0 ? bias_0->data<T>() : nullptr; const T* bias_1_data = bias_1 ? bias_1->data<T>() : nullptr; const T* scale_data = scale ? scale->data<T>() : nullptr; auto* mean = ctx.Output<framework::Tensor>("Mean"); auto* variance = ctx.Output<framework::Tensor>("Variance"); T* mean_data = mean ? mean->mutable_data<T>(ctx.GetPlace()) : nullptr; T* variance_data = variance ? variance->mutable_data<T>(ctx.GetPlace()) : nullptr; bool with_relu = (ctx.Attr<std::string>("activation_type") == "relu") ? true : false; float epsilon = ctx.Attr<float>("epsilon"); int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); if (with_relu) { switch (platform::RoundToPowerOfTwo(N)) { CUDA_LAUNCH_KERNEL_HELPER( InplaceAddReluAddLayerNormKernel< T, true, kPowerOfTwoDim><<<std::max(max_threads / kPowerOfTwoDim, 1), kPowerOfTwoDim, 0, dev_ctx.stream()>>>( y_data, bias_0_data, bias_1_data, scale_data, out_data, mean_data, variance_data, M, N, epsilon)); } } else { switch (platform::RoundToPowerOfTwo(N)) { CUDA_LAUNCH_KERNEL_HELPER( InplaceAddReluAddLayerNormKernel< T, false, kPowerOfTwoDim><<<std::max(max_threads / kPowerOfTwoDim, 1), kPowerOfTwoDim, 0, dev_ctx.stream()>>>( y_data, bias_0_data, bias_1_data, scale_data, out_data, mean_data, variance_data, M, N, epsilon)); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(fused_fc_elementwise_layernorm, ops::FusedFCElementwiseLayerNormOpKernel<float>);
1DStencil_pratica1_resposta.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define RADIUS 3 #define BLOCK_SIZE 5 #define NUM_ELEMENTS 10 // CUDA API error checking macro #define cudaCheck(error) \ if (error != hipSuccess) { \ printf("Fatal error: %s at %s:%d\n", \ hipGetErrorString(error), \ __FILE__, __LINE__); \ exit(1); \ } __global__ void stencil_1d(int *in, int *out) { int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS; int local = 0; // Apply the stencil for (int offset = -RADIUS ; offset <= RADIUS ; offset++) local += in[gindex + offset]; __syncthreads(); out[gindex-RADIUS] = local; } int main() { unsigned int i; int h_in[NUM_ELEMENTS + 2 * RADIUS], h_out[NUM_ELEMENTS]; int *d_in, *d_out; // Initialize host data for( i = 0; i < (NUM_ELEMENTS + 2*RADIUS); ++i ) h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7 // Allocate space on the device cudaCheck( hipMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int)) ); cudaCheck( hipMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) ); // Copy input data to device cudaCheck( hipMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( stencil_1d), dim3((NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, d_in, d_out); cudaCheck( hipMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), hipMemcpyDeviceToHost) ); // Verify every out value is 7 for( i = 0; i < NUM_ELEMENTS; ++i ) if (h_out[i] != 7) { printf("Element h_out[%d] == %d != 7\n", i, h_out[i]); break; } if (i == NUM_ELEMENTS) printf("SUCCESS!\n"); // Free out memory hipFree(d_in); hipFree(d_out); return 0; }
1DStencil_pratica1_resposta.cu
#include <stdio.h> #define RADIUS 3 #define BLOCK_SIZE 5 #define NUM_ELEMENTS 10 // CUDA API error checking macro #define cudaCheck(error) \ if (error != cudaSuccess) { \ printf("Fatal error: %s at %s:%d\n", \ cudaGetErrorString(error), \ __FILE__, __LINE__); \ exit(1); \ } __global__ void stencil_1d(int *in, int *out) { int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS; int local = 0; // Apply the stencil for (int offset = -RADIUS ; offset <= RADIUS ; offset++) local += in[gindex + offset]; __syncthreads(); out[gindex-RADIUS] = local; } int main() { unsigned int i; int h_in[NUM_ELEMENTS + 2 * RADIUS], h_out[NUM_ELEMENTS]; int *d_in, *d_out; // Initialize host data for( i = 0; i < (NUM_ELEMENTS + 2*RADIUS); ++i ) h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7 // Allocate space on the device cudaCheck( cudaMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int)) ); cudaCheck( cudaMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) ); // Copy input data to device cudaCheck( cudaMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), cudaMemcpyHostToDevice) ); stencil_1d<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out); cudaCheck( cudaMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost) ); // Verify every out value is 7 for( i = 0; i < NUM_ELEMENTS; ++i ) if (h_out[i] != 7) { printf("Element h_out[%d] == %d != 7\n", i, h_out[i]); break; } if (i == NUM_ELEMENTS) printf("SUCCESS!\n"); // Free out memory cudaFree(d_in); cudaFree(d_out); return 0; }
c80a855dfd8b326d5044dcd89f5b7a427aa64ae4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // *************************************************************** // cuDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision: 3505 $ // $Date: 2007-07-06 09:26:06 -0700 (Fri, 06 Jul 2007) $ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt in // the root directory of this source distribution. // ------------------------------------------------------------- /** * @file * spmvmult_app.cu * * @brief CUDPP application-level scan routines */ /** \addtogroup cudpp_app * */ #include "cudpp.h" #include "cudpp_util.h" #include "cudpp_plan.h" #include "cudpp_globals.h" #include "kernel/spmvmult_kernel.cu" extern "C" void cudppSegmentedScanDispatch (void *d_out, const void *d_idata, const unsigned int *d_iflags, int numElements, const CUDPPSegmentedScanPlan *plan ); #include <cutil.h> #include <cstdlib> #include <cstdio> #include <assert.h> /** @name Sparse Matrix-Vector Multiply Functions * @{ */ /** @brief Perform matrix-vector multiply for sparse matrices and vectors of arbitrary size. * * This function performs the sparse matrix-vector multiply by executing four steps. * * 1. The sparseMatrixVectorFetchAndMultiply() kernel does an element-wise multiplication of a * each element e in CUDPPSparseMatrixVectorMultiplyPlan::m_d_A with the corresponding * (i.e. in the same row as the column index of e in CUDPPSparseMatrixVectorMultiplyPlan::m_d_A) * element in d_x and stores the product in CUDPPSparseMatrixVectorMultiplyPlan::m_d_prod. It * also sets all elements of CUDPPSparseMatrixVectorMultiplyPlan::m_d_flags to 0. * * 2. The sparseMatrixVectorSetFlags() kernel iterates over each element in * CUDPPSparseMatrixVectorMultiplyPlan::m_d_rowIndex and sets * the corresponding position (indicated by CUDPPSparseMatrixVectorMultiplyPlan::m_d_rowIndex) in * CUDPPSparseMatrixVectorMultiplyPlan::m_d_flags to 1. * * 3. Perform a segmented scan on CUDPPSparseMatrixVectorMultiplyPlan::m_d_prod with * CUDPPSparseMatrixVectorMultiplyPlan::m_d_flags as the flag vector. The output is * stored in CUDPPSparseMatrixVectorMultiplyPlan::m_d_prod. * * 4. The yGather() kernel goes over each element in CUDPPSparseMatrixVectorMultiplyPlan::m_d_rowFinalIndex * and picks the corresponding element (indicated by CUDPPSparseMatrixVectorMultiplyPlan::m_d_rowFinalIndex) * element from CUDPPSparseMatrixVectorMultiplyPlan::m_d_prod and stores it in d_y. * * @param[out] d_y The output array for the sparse matrix-vector multiply (y vector) * @param[in] d_x The input x vector * @param[in] plan Pointer to the CUDPPSparseMatrixVectorMultiplyPlan object which stores the * configuration and pointers to temporary buffers needed by this routine */ template <class T> void sparseMatrixVectorMultiply( T *d_y, const T *d_x, const CUDPPSparseMatrixVectorMultiplyPlan *plan ) { unsigned int numEltsBlocks = max(1, (int)ceil((double)plan->m_numNonZeroElements / ((double)SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE))); bool fullBlock = (plan->m_numNonZeroElements == (numEltsBlocks * SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE)); dim3 gridElts(max(1, numEltsBlocks), 1, 1); dim3 threads(SCAN_CTA_SIZE, 1, 1); if (fullBlock) hipLaunchKernelGGL(( sparseMatrixVectorFetchAndMultiply<T, true>), dim3(gridElts), dim3(threads), 0, 0, plan->m_d_flags, (T*)plan->m_d_prod, (T*)plan->m_d_A, d_x, plan->m_d_index, plan->m_numNonZeroElements); else hipLaunchKernelGGL(( sparseMatrixVectorFetchAndMultiply<T, false>), dim3(gridElts), dim3(threads), 0, 0, plan->m_d_flags, (T*)plan->m_d_prod, (T*)plan->m_d_A, d_x, plan->m_d_index, plan->m_numNonZeroElements); unsigned int numRowBlocks = max(1, (int)ceil((double)plan->m_numRows / ((double)SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE))); dim3 gridRows(max(1, numRowBlocks), 1, 1); hipLaunchKernelGGL(( sparseMatrixVectorSetFlags), dim3(gridRows), dim3(threads), 0, 0, plan->m_d_flags, plan->m_d_rowIndex, plan->m_numRows); cudppSegmentedScanDispatch ((T*)plan->m_d_prod, (const T*)plan->m_d_prod, plan->m_d_flags, plan->m_numNonZeroElements, plan->m_segmentedScanPlan); hipLaunchKernelGGL(( yGather), dim3(gridRows), dim3(threads), 0, 0, d_y, (T*)plan->m_d_prod, plan->m_d_rowFinalIndex, plan->m_numRows); } #ifdef __cplusplus extern "C" { #endif // file scope /** @brief Allocate intermediate product, flags and rowFindx (index of the last * element of each row) array . * * @param[in] plan Pointer to CUDPPSparseMatrixVectorMultiplyPlan class containing sparse * matrix-vector multiply options, number of non-zero elements and number * of rows which is used to compute storage requirements * @param[in] A The matrix A * @param[in] rowindx The indices of elements in A which are the first element of their row * @param[in] indx The column number for each element in A */ void allocSparseMatrixVectorMultiplyStorage(CUDPPSparseMatrixVectorMultiplyPlan *plan, const void *A, const unsigned int *rowindx, const unsigned int *indx) { switch(plan->m_config.datatype) { case CUDPP_INT: CUDA_SAFE_CALL(hipMalloc(&(plan->m_d_prod), plan->m_numNonZeroElements * sizeof(int))); CUDA_SAFE_CALL(hipMalloc(&(plan->m_d_A), plan->m_numNonZeroElements * sizeof(int))); CUDA_SAFE_CALL(hipMemcpy(plan->m_d_A, (int *)A, plan->m_numNonZeroElements * sizeof(int), hipMemcpyHostToDevice) ); break; case CUDPP_UINT: CUDA_SAFE_CALL(hipMalloc(&(plan->m_d_prod), plan->m_numNonZeroElements * sizeof(unsigned int))); CUDA_SAFE_CALL(hipMalloc(&(plan->m_d_A), plan->m_numNonZeroElements * sizeof(unsigned int))); CUDA_SAFE_CALL(hipMemcpy(plan->m_d_A, (unsigned int *)A, plan->m_numNonZeroElements * sizeof(unsigned int), hipMemcpyHostToDevice) ); break; case CUDPP_FLOAT: CUDA_SAFE_CALL(hipMalloc(&(plan->m_d_prod), plan->m_numNonZeroElements * sizeof(float))); CUDA_SAFE_CALL(hipMalloc(&(plan->m_d_A), plan->m_numNonZeroElements * sizeof(float))); CUDA_SAFE_CALL(hipMemcpy(plan->m_d_A, (float *)A, plan->m_numNonZeroElements * sizeof(float), hipMemcpyHostToDevice) ); break; default: break; } CUDA_SAFE_CALL(hipMalloc((void **)&(plan->m_d_flags), plan->m_numNonZeroElements * sizeof(unsigned int))); CUDA_SAFE_CALL(hipMalloc((void **)&(plan->m_d_index), plan->m_numNonZeroElements * sizeof(unsigned int))); CUDA_SAFE_CALL(hipMalloc((void **)&(plan->m_d_rowFinalIndex), plan->m_numRows * sizeof(unsigned int))); CUDA_SAFE_CALL(hipMalloc((void **)&(plan->m_d_rowIndex), plan->m_numRows * sizeof(unsigned int))); CUDA_SAFE_CALL(hipMemcpy(plan->m_d_rowFinalIndex, plan->m_rowFinalIndex, plan->m_numRows * sizeof(unsigned int), hipMemcpyHostToDevice) ); CUDA_SAFE_CALL( hipMemcpy(plan->m_d_rowIndex, rowindx, plan->m_numRows * sizeof(unsigned int), hipMemcpyHostToDevice) ); CUDA_SAFE_CALL( hipMemcpy(plan->m_d_index, indx, plan->m_numNonZeroElements * sizeof(unsigned int), hipMemcpyHostToDevice) ); CUT_CHECK_ERROR("allocSparseMatrixVectorMultiplyStorage"); } /** @brief Deallocate intermediate product, flags and rowFindx (index of the last * element of each row) array . * * These arrays must have been allocated by allocSparseMatrixVectorMultiplyStorage(), which is called * by the constructor of CUDPPSparseMatrixVectorMultiplyPlan. * * @param[in] plan Pointer to CUDPPSparseMatrixVectorMultiplyPlan plan initialized by its constructor. */ void freeSparseMatrixVectorMultiplyStorage(CUDPPSparseMatrixVectorMultiplyPlan *plan) { CUT_CHECK_ERROR("freeSparseMatrixVectorMultiply"); hipFree(plan->m_d_prod); hipFree(plan->m_d_A); hipFree((void*)plan->m_d_flags); hipFree((void*)plan->m_d_index); hipFree((void*)plan->m_d_rowFinalIndex); hipFree((void*)plan->m_d_rowIndex); plan->m_d_prod = 0; plan->m_d_A = 0; plan->m_d_flags = 0; plan->m_d_index = 0; plan->m_d_rowFinalIndex = 0; plan->m_d_rowIndex = 0; plan->m_numNonZeroElements = 0; plan->m_numRows = 0; } /** @brief Dispatch function to perform a sparse matrix-vector multiply * with the specified configuration. * * This is the dispatch routine which calls sparseMatrixVectorMultiply() with * appropriate template parameters and arguments * * @param[out] d_y The output vector for y = A*x * @param[in] d_x The x vector for y = A*x * @param[in] plan The sparse matrix plan and data */ void cudppSparseMatrixVectorMultiplyDispatch ( void *d_y, const void *d_x, const CUDPPSparseMatrixVectorMultiplyPlan *plan ) { switch(plan->m_config.datatype) { case CUDPP_INT: sparseMatrixVectorMultiply<int>((int *)d_y, (int *)d_x, plan); break; case CUDPP_UINT: sparseMatrixVectorMultiply<unsigned int>((unsigned int *)d_y, (unsigned int *)d_x, plan); break; case CUDPP_FLOAT: sparseMatrixVectorMultiply<float>((float *)d_y, (float *)d_x, plan); break; default: break; } } #ifdef __cplusplus } #endif /** @} */ // end sparse matrix-vector multiply functions /** @} */ // end cudpp_app
c80a855dfd8b326d5044dcd89f5b7a427aa64ae4.cu
// *************************************************************** // cuDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision: 3505 $ // $Date: 2007-07-06 09:26:06 -0700 (Fri, 06 Jul 2007) $ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt in // the root directory of this source distribution. // ------------------------------------------------------------- /** * @file * spmvmult_app.cu * * @brief CUDPP application-level scan routines */ /** \addtogroup cudpp_app * */ #include "cudpp.h" #include "cudpp_util.h" #include "cudpp_plan.h" #include "cudpp_globals.h" #include "kernel/spmvmult_kernel.cu" extern "C" void cudppSegmentedScanDispatch (void *d_out, const void *d_idata, const unsigned int *d_iflags, int numElements, const CUDPPSegmentedScanPlan *plan ); #include <cutil.h> #include <cstdlib> #include <cstdio> #include <assert.h> /** @name Sparse Matrix-Vector Multiply Functions * @{ */ /** @brief Perform matrix-vector multiply for sparse matrices and vectors of arbitrary size. * * This function performs the sparse matrix-vector multiply by executing four steps. * * 1. The sparseMatrixVectorFetchAndMultiply() kernel does an element-wise multiplication of a * each element e in CUDPPSparseMatrixVectorMultiplyPlan::m_d_A with the corresponding * (i.e. in the same row as the column index of e in CUDPPSparseMatrixVectorMultiplyPlan::m_d_A) * element in d_x and stores the product in CUDPPSparseMatrixVectorMultiplyPlan::m_d_prod. It * also sets all elements of CUDPPSparseMatrixVectorMultiplyPlan::m_d_flags to 0. * * 2. The sparseMatrixVectorSetFlags() kernel iterates over each element in * CUDPPSparseMatrixVectorMultiplyPlan::m_d_rowIndex and sets * the corresponding position (indicated by CUDPPSparseMatrixVectorMultiplyPlan::m_d_rowIndex) in * CUDPPSparseMatrixVectorMultiplyPlan::m_d_flags to 1. * * 3. Perform a segmented scan on CUDPPSparseMatrixVectorMultiplyPlan::m_d_prod with * CUDPPSparseMatrixVectorMultiplyPlan::m_d_flags as the flag vector. The output is * stored in CUDPPSparseMatrixVectorMultiplyPlan::m_d_prod. * * 4. The yGather() kernel goes over each element in CUDPPSparseMatrixVectorMultiplyPlan::m_d_rowFinalIndex * and picks the corresponding element (indicated by CUDPPSparseMatrixVectorMultiplyPlan::m_d_rowFinalIndex) * element from CUDPPSparseMatrixVectorMultiplyPlan::m_d_prod and stores it in d_y. * * @param[out] d_y The output array for the sparse matrix-vector multiply (y vector) * @param[in] d_x The input x vector * @param[in] plan Pointer to the CUDPPSparseMatrixVectorMultiplyPlan object which stores the * configuration and pointers to temporary buffers needed by this routine */ template <class T> void sparseMatrixVectorMultiply( T *d_y, const T *d_x, const CUDPPSparseMatrixVectorMultiplyPlan *plan ) { unsigned int numEltsBlocks = max(1, (int)ceil((double)plan->m_numNonZeroElements / ((double)SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE))); bool fullBlock = (plan->m_numNonZeroElements == (numEltsBlocks * SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE)); dim3 gridElts(max(1, numEltsBlocks), 1, 1); dim3 threads(SCAN_CTA_SIZE, 1, 1); if (fullBlock) sparseMatrixVectorFetchAndMultiply<T, true><<<gridElts, threads>>> (plan->m_d_flags, (T*)plan->m_d_prod, (T*)plan->m_d_A, d_x, plan->m_d_index, plan->m_numNonZeroElements); else sparseMatrixVectorFetchAndMultiply<T, false><<<gridElts, threads>>> (plan->m_d_flags, (T*)plan->m_d_prod, (T*)plan->m_d_A, d_x, plan->m_d_index, plan->m_numNonZeroElements); unsigned int numRowBlocks = max(1, (int)ceil((double)plan->m_numRows / ((double)SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE))); dim3 gridRows(max(1, numRowBlocks), 1, 1); sparseMatrixVectorSetFlags<<<gridRows, threads>>> (plan->m_d_flags, plan->m_d_rowIndex, plan->m_numRows); cudppSegmentedScanDispatch ((T*)plan->m_d_prod, (const T*)plan->m_d_prod, plan->m_d_flags, plan->m_numNonZeroElements, plan->m_segmentedScanPlan); yGather<<<gridRows, threads>>> (d_y, (T*)plan->m_d_prod, plan->m_d_rowFinalIndex, plan->m_numRows); } #ifdef __cplusplus extern "C" { #endif // file scope /** @brief Allocate intermediate product, flags and rowFindx (index of the last * element of each row) array . * * @param[in] plan Pointer to CUDPPSparseMatrixVectorMultiplyPlan class containing sparse * matrix-vector multiply options, number of non-zero elements and number * of rows which is used to compute storage requirements * @param[in] A The matrix A * @param[in] rowindx The indices of elements in A which are the first element of their row * @param[in] indx The column number for each element in A */ void allocSparseMatrixVectorMultiplyStorage(CUDPPSparseMatrixVectorMultiplyPlan *plan, const void *A, const unsigned int *rowindx, const unsigned int *indx) { switch(plan->m_config.datatype) { case CUDPP_INT: CUDA_SAFE_CALL(cudaMalloc(&(plan->m_d_prod), plan->m_numNonZeroElements * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc(&(plan->m_d_A), plan->m_numNonZeroElements * sizeof(int))); CUDA_SAFE_CALL(cudaMemcpy(plan->m_d_A, (int *)A, plan->m_numNonZeroElements * sizeof(int), cudaMemcpyHostToDevice) ); break; case CUDPP_UINT: CUDA_SAFE_CALL(cudaMalloc(&(plan->m_d_prod), plan->m_numNonZeroElements * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc(&(plan->m_d_A), plan->m_numNonZeroElements * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMemcpy(plan->m_d_A, (unsigned int *)A, plan->m_numNonZeroElements * sizeof(unsigned int), cudaMemcpyHostToDevice) ); break; case CUDPP_FLOAT: CUDA_SAFE_CALL(cudaMalloc(&(plan->m_d_prod), plan->m_numNonZeroElements * sizeof(float))); CUDA_SAFE_CALL(cudaMalloc(&(plan->m_d_A), plan->m_numNonZeroElements * sizeof(float))); CUDA_SAFE_CALL(cudaMemcpy(plan->m_d_A, (float *)A, plan->m_numNonZeroElements * sizeof(float), cudaMemcpyHostToDevice) ); break; default: break; } CUDA_SAFE_CALL(cudaMalloc((void **)&(plan->m_d_flags), plan->m_numNonZeroElements * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void **)&(plan->m_d_index), plan->m_numNonZeroElements * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void **)&(plan->m_d_rowFinalIndex), plan->m_numRows * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void **)&(plan->m_d_rowIndex), plan->m_numRows * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMemcpy(plan->m_d_rowFinalIndex, plan->m_rowFinalIndex, plan->m_numRows * sizeof(unsigned int), cudaMemcpyHostToDevice) ); CUDA_SAFE_CALL( cudaMemcpy(plan->m_d_rowIndex, rowindx, plan->m_numRows * sizeof(unsigned int), cudaMemcpyHostToDevice) ); CUDA_SAFE_CALL( cudaMemcpy(plan->m_d_index, indx, plan->m_numNonZeroElements * sizeof(unsigned int), cudaMemcpyHostToDevice) ); CUT_CHECK_ERROR("allocSparseMatrixVectorMultiplyStorage"); } /** @brief Deallocate intermediate product, flags and rowFindx (index of the last * element of each row) array . * * These arrays must have been allocated by allocSparseMatrixVectorMultiplyStorage(), which is called * by the constructor of CUDPPSparseMatrixVectorMultiplyPlan. * * @param[in] plan Pointer to CUDPPSparseMatrixVectorMultiplyPlan plan initialized by its constructor. */ void freeSparseMatrixVectorMultiplyStorage(CUDPPSparseMatrixVectorMultiplyPlan *plan) { CUT_CHECK_ERROR("freeSparseMatrixVectorMultiply"); cudaFree(plan->m_d_prod); cudaFree(plan->m_d_A); cudaFree((void*)plan->m_d_flags); cudaFree((void*)plan->m_d_index); cudaFree((void*)plan->m_d_rowFinalIndex); cudaFree((void*)plan->m_d_rowIndex); plan->m_d_prod = 0; plan->m_d_A = 0; plan->m_d_flags = 0; plan->m_d_index = 0; plan->m_d_rowFinalIndex = 0; plan->m_d_rowIndex = 0; plan->m_numNonZeroElements = 0; plan->m_numRows = 0; } /** @brief Dispatch function to perform a sparse matrix-vector multiply * with the specified configuration. * * This is the dispatch routine which calls sparseMatrixVectorMultiply() with * appropriate template parameters and arguments * * @param[out] d_y The output vector for y = A*x * @param[in] d_x The x vector for y = A*x * @param[in] plan The sparse matrix plan and data */ void cudppSparseMatrixVectorMultiplyDispatch ( void *d_y, const void *d_x, const CUDPPSparseMatrixVectorMultiplyPlan *plan ) { switch(plan->m_config.datatype) { case CUDPP_INT: sparseMatrixVectorMultiply<int>((int *)d_y, (int *)d_x, plan); break; case CUDPP_UINT: sparseMatrixVectorMultiply<unsigned int>((unsigned int *)d_y, (unsigned int *)d_x, plan); break; case CUDPP_FLOAT: sparseMatrixVectorMultiply<float>((float *)d_y, (float *)d_x, plan); break; default: break; } } #ifdef __cplusplus } #endif /** @} */ // end sparse matrix-vector multiply functions /** @} */ // end cudpp_app
d2324ea0bbd5505667550b24a5f9109c7523b745.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include "ATen/hip/HIPContext.h" #include <torch/serialize/tensor.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> namespace { #define CUDA_1D_KERNEL_LOOP(i, n) \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) // The number of cuda threads to use. 512 is used for backward compatibility constexpr int ROI_CUDA_NUM_THREADS = 512; // The maximum number of blocks to use in the default kernel call. constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096; /** * @brief Compute the number of blocks needed to run N threads. */ inline int ROI_GET_BLOCKS(const int N) { return ::max( ::min( (N + ROI_CUDA_NUM_THREADS - 1) / ROI_CUDA_NUM_THREADS, ROI_MAXIMUM_NUM_BLOCKS), // Use at least 1 block, since CUDA does not allow empty block 1); } template <typename T> __device__ T bilinear_interpolate( const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int y_low = static_cast<int>(y); int x_low = static_cast<int>(x); int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForwardKernel( const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T* w1, T* w2, T* w3, T* w4, int* x_low, int* x_high, int* y_low, int* y_high, const int /*index*/ /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty *w1 = *w2 = *w3 = *w4 = 0.; *x_low = *x_high = *y_low = *y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } *y_low = static_cast<int>(y); *x_low = static_cast<int>(x); if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = (T)*y_low; } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = (T)*x_low; } else { *x_high = *x_low + 1; } T ly = y - *y_low; T lx = x - *x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } template <> inline __device__ double gpu_atomic_add(const double val, double* address) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull; unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return val; } template <typename T> __global__ void RoIAlignBackwardKernel( const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, &w1, &w2, &w3, &w4, &x_low, &x_high, &y_low, &y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { /* atomicAdd( offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd( offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd( offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd( offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); */ gpu_atomic_add( static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low); gpu_atomic_add( static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high); gpu_atomic_add( static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low); gpu_atomic_add( static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward } // namespace at::Tensor ROIAlignForwardCUDA( const at::Tensor input, const at::Tensor rois, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(input.is_contiguous()); AT_ASSERT(rois.is_contiguous()); AT_ASSERT(input.ndimension() == 4); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto proposals = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); // Output Tensor is (num_rois, C, pooled_height, pooled_width) auto output = torch::zeros({proposals, channels, pooled_height, pooled_width}, input.options()); //input.type().tensor({proposals, channels, pooled_height, pooled_width}); auto count = output.numel(); AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignForwardCUDA", ([&] { hipLaunchKernelGGL(( RoIAlignForwardKernel<scalar_t>) , dim3(ROI_GET_BLOCKS(count)), dim3(ROI_CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input.data<scalar_t>(), static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.data<scalar_t>(), output.data<scalar_t>()); })); AT_ASSERT(hipGetLastError() == hipSuccess); return output; } at::Tensor ROIAlignBackwardCUDA( const at::Tensor rois, const at::Tensor grad_output, int64_t b_size, int64_t channels, int64_t height, int64_t width, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(rois.is_contiguous()); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto roi_cols = rois.size(1); AT_ASSERT(roi_cols == 4 || roi_cols == 5); // Output Tensor is (num_rois, C, pooled_height, pooled_width) // gradient wrt input features auto grad_in = torch::zeros({b_size, channels, height, width}, rois.options()); //rois.type().tensor({b_size, channels, height, width}).zero_(); auto num_rois = rois.size(0); auto count = grad_output.numel(); AT_DISPATCH_FLOATING_TYPES(rois.type(), "ROIAlignBackwardCUDA", ([&] { hipLaunchKernelGGL(( RoIAlignBackwardKernel<scalar_t>) , dim3(ROI_GET_BLOCKS(count)), dim3(ROI_CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, grad_output.data<scalar_t>(), num_rois, static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_in.data<scalar_t>(), rois.data<scalar_t>()); })); AT_ASSERT(hipGetLastError() == hipSuccess); return grad_in; }
d2324ea0bbd5505667550b24a5f9109c7523b745.cu
#include <ATen/ATen.h> #include "ATen/cuda/CUDAContext.h" #include <torch/serialize/tensor.h> #include <cuda.h> #include <cuda_runtime.h> namespace { #define CUDA_1D_KERNEL_LOOP(i, n) \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) // The number of cuda threads to use. 512 is used for backward compatibility constexpr int ROI_CUDA_NUM_THREADS = 512; // The maximum number of blocks to use in the default kernel call. constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096; /** * @brief Compute the number of blocks needed to run N threads. */ inline int ROI_GET_BLOCKS(const int N) { return std::max( std::min( (N + ROI_CUDA_NUM_THREADS - 1) / ROI_CUDA_NUM_THREADS, ROI_MAXIMUM_NUM_BLOCKS), // Use at least 1 block, since CUDA does not allow empty block 1); } template <typename T> __device__ T bilinear_interpolate( const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int y_low = static_cast<int>(y); int x_low = static_cast<int>(x); int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForwardKernel( const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T* w1, T* w2, T* w3, T* w4, int* x_low, int* x_high, int* y_low, int* y_high, const int /*index*/ /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty *w1 = *w2 = *w3 = *w4 = 0.; *x_low = *x_high = *y_low = *y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } *y_low = static_cast<int>(y); *x_low = static_cast<int>(x); if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = (T)*y_low; } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = (T)*x_low; } else { *x_high = *x_low + 1; } T ly = y - *y_low; T lx = x - *x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } template <> inline __device__ double gpu_atomic_add(const double val, double* address) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull; unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return val; } template <typename T> __global__ void RoIAlignBackwardKernel( const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, &w1, &w2, &w3, &w4, &x_low, &x_high, &y_low, &y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { /* atomicAdd( offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd( offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd( offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd( offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); */ gpu_atomic_add( static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low); gpu_atomic_add( static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high); gpu_atomic_add( static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low); gpu_atomic_add( static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward } // namespace at::Tensor ROIAlignForwardCUDA( const at::Tensor input, const at::Tensor rois, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(input.is_contiguous()); AT_ASSERT(rois.is_contiguous()); AT_ASSERT(input.ndimension() == 4); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto proposals = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); // Output Tensor is (num_rois, C, pooled_height, pooled_width) auto output = torch::zeros({proposals, channels, pooled_height, pooled_width}, input.options()); //input.type().tensor({proposals, channels, pooled_height, pooled_width}); auto count = output.numel(); AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignForwardCUDA", ([&] { RoIAlignForwardKernel<scalar_t> <<<ROI_GET_BLOCKS(count), ROI_CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, input.data<scalar_t>(), static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.data<scalar_t>(), output.data<scalar_t>()); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return output; } at::Tensor ROIAlignBackwardCUDA( const at::Tensor rois, const at::Tensor grad_output, int64_t b_size, int64_t channels, int64_t height, int64_t width, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(rois.is_contiguous()); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto roi_cols = rois.size(1); AT_ASSERT(roi_cols == 4 || roi_cols == 5); // Output Tensor is (num_rois, C, pooled_height, pooled_width) // gradient wrt input features auto grad_in = torch::zeros({b_size, channels, height, width}, rois.options()); //rois.type().tensor({b_size, channels, height, width}).zero_(); auto num_rois = rois.size(0); auto count = grad_output.numel(); AT_DISPATCH_FLOATING_TYPES(rois.type(), "ROIAlignBackwardCUDA", ([&] { RoIAlignBackwardKernel<scalar_t> <<<ROI_GET_BLOCKS(count), ROI_CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, grad_output.data<scalar_t>(), num_rois, static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_in.data<scalar_t>(), rois.data<scalar_t>()); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return grad_in; }
f8af7cb3439d8337082bf3da3700bf4e93f40bfe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define BLOCK_PER_SM (8 * 2) #define SM_NUM 56 #define BLOCK_NUM (SM_NUM * BLOCK_PER_SM) #define THREAD_PER_BLOCK 256 #define TOTAL_NUM (BLOCK_NUM * THREAD_PER_BLOCK) //#define WARP_AWARE //#define DEVICE_ALLOC #define UVM_ALLOC //#define HOST_ALLOC //#define SIZE (1024 * 1024 * 2L * TOTAL_NUM) //#define SIZE (1024 * 1024 * 9 * 7 * 5) #define SIZE (1024 * 1024 * 7 * 512L) //#define STEP (512) #define STEP (1024 * 16) #define LAT_ARRAY_SIZE 12 #define LAT_LOWER_BOUND 10000 #define LAT_HIGHER_BOUND 20000 __global__ void kernel(int *input, double *total_lat) { //unsigned t0, t1, lat; int tmp; __shared__ int s_tmp; s_tmp = 0; #ifdef WARP_AWARE unsigned idx = (blockIdx.x * blockDim.x + threadIdx.x) / 32; #else unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; #endif unsigned long long begin = SIZE / TOTAL_NUM * idx; unsigned long long end = SIZE / TOTAL_NUM * (idx + 1); for (unsigned long long i = begin; i < end; i += STEP) { tmp = input[i]; s_tmp += tmp; } } int main() { int *d_input; double *total_lat, *h_total_lat; h_total_lat = (double*)malloc(LAT_ARRAY_SIZE * sizeof(double)); hipMalloc(&total_lat, LAT_ARRAY_SIZE*sizeof(double)); for (int i = 0; i < LAT_ARRAY_SIZE; i++) h_total_lat[i] = 0.0; hipMemcpy(total_lat, h_total_lat, LAT_ARRAY_SIZE*sizeof(double), hipMemcpyHostToDevice); #if defined(DEVICE_ALLOC) hipMalloc(&d_input, SIZE*sizeof(int)); #elif defined(UVM_ALLOC) hipMallocManaged(&d_input, SIZE*sizeof(int)); hipMemAdvise(d_input, SIZE*sizeof(int), hipMemAdviseSetReadMostly, 0); #elif defined(HOST_ALLOC) hipHostMalloc(&d_input, SIZE*sizeof(int)); #else return 0; #endif // init #if defined(DEVICE_ALLOC) int *h_input; h_input = (int*)malloc(SIZE*sizeof(int)); for (unsigned long long i = 0; i < SIZE; i += STEP) { h_input[i] = rand(); } hipMemcpy(d_input, h_input, SIZE*sizeof(int), hipMemcpyHostToDevice); #elif defined(UVM_ALLOC) || defined(HOST_ALLOC) for (unsigned long long i = 0; i < SIZE; i += STEP) { d_input[i] = rand(); } #endif #ifdef WARP_AWARE hipLaunchKernelGGL(( kernel), dim3(BLOCK_NUM), dim3(THREAD_PER_BLOCK * 32), 0, 0, d_input, total_lat); #else hipLaunchKernelGGL(( kernel), dim3(BLOCK_NUM), dim3(THREAD_PER_BLOCK), 0, 0, d_input, total_lat); #endif hipMemcpy(h_total_lat, total_lat, LAT_ARRAY_SIZE*sizeof(double), hipMemcpyDeviceToHost); hipFree(d_input); hipFree(total_lat); printf("Access #: %llu\n", SIZE / STEP); #ifdef WARP_AWARE printf("Accesses per warp: %llu\n", SIZE / STEP / TOTAL_NUM); #else printf("Accesses per thread: %llu\n", SIZE / STEP / TOTAL_NUM); #endif return 0; }
f8af7cb3439d8337082bf3da3700bf4e93f40bfe.cu
#include <stdio.h> #include <stdlib.h> #define BLOCK_PER_SM (8 * 2) #define SM_NUM 56 #define BLOCK_NUM (SM_NUM * BLOCK_PER_SM) #define THREAD_PER_BLOCK 256 #define TOTAL_NUM (BLOCK_NUM * THREAD_PER_BLOCK) //#define WARP_AWARE //#define DEVICE_ALLOC #define UVM_ALLOC //#define HOST_ALLOC //#define SIZE (1024 * 1024 * 2L * TOTAL_NUM) //#define SIZE (1024 * 1024 * 9 * 7 * 5) #define SIZE (1024 * 1024 * 7 * 512L) //#define STEP (512) #define STEP (1024 * 16) #define LAT_ARRAY_SIZE 12 #define LAT_LOWER_BOUND 10000 #define LAT_HIGHER_BOUND 20000 __global__ void kernel(int *input, double *total_lat) { //unsigned t0, t1, lat; int tmp; __shared__ int s_tmp; s_tmp = 0; #ifdef WARP_AWARE unsigned idx = (blockIdx.x * blockDim.x + threadIdx.x) / 32; #else unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; #endif unsigned long long begin = SIZE / TOTAL_NUM * idx; unsigned long long end = SIZE / TOTAL_NUM * (idx + 1); for (unsigned long long i = begin; i < end; i += STEP) { tmp = input[i]; s_tmp += tmp; } } int main() { int *d_input; double *total_lat, *h_total_lat; h_total_lat = (double*)malloc(LAT_ARRAY_SIZE * sizeof(double)); cudaMalloc(&total_lat, LAT_ARRAY_SIZE*sizeof(double)); for (int i = 0; i < LAT_ARRAY_SIZE; i++) h_total_lat[i] = 0.0; cudaMemcpy(total_lat, h_total_lat, LAT_ARRAY_SIZE*sizeof(double), cudaMemcpyHostToDevice); #if defined(DEVICE_ALLOC) cudaMalloc(&d_input, SIZE*sizeof(int)); #elif defined(UVM_ALLOC) cudaMallocManaged(&d_input, SIZE*sizeof(int)); cudaMemAdvise(d_input, SIZE*sizeof(int), cudaMemAdviseSetReadMostly, 0); #elif defined(HOST_ALLOC) cudaMallocHost(&d_input, SIZE*sizeof(int)); #else return 0; #endif // init #if defined(DEVICE_ALLOC) int *h_input; h_input = (int*)malloc(SIZE*sizeof(int)); for (unsigned long long i = 0; i < SIZE; i += STEP) { h_input[i] = rand(); } cudaMemcpy(d_input, h_input, SIZE*sizeof(int), cudaMemcpyHostToDevice); #elif defined(UVM_ALLOC) || defined(HOST_ALLOC) for (unsigned long long i = 0; i < SIZE; i += STEP) { d_input[i] = rand(); } #endif #ifdef WARP_AWARE kernel<<<BLOCK_NUM, THREAD_PER_BLOCK * 32>>>(d_input, total_lat); #else kernel<<<BLOCK_NUM, THREAD_PER_BLOCK>>>(d_input, total_lat); #endif cudaMemcpy(h_total_lat, total_lat, LAT_ARRAY_SIZE*sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_input); cudaFree(total_lat); printf("Access #: %llu\n", SIZE / STEP); #ifdef WARP_AWARE printf("Accesses per warp: %llu\n", SIZE / STEP / TOTAL_NUM); #else printf("Accesses per thread: %llu\n", SIZE / STEP / TOTAL_NUM); #endif return 0; }
a16fe732dfb5440b3a9d24c58a81b9a70a30337b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "compute_d_w_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *v = NULL; hipMalloc(&v, XSIZE*YSIZE); float *h = NULL; hipMalloc(&h, XSIZE*YSIZE); float *dw = NULL; hipMalloc(&dw, XSIZE*YSIZE); bool is_init = 1; int input_size = XSIZE*YSIZE; int lu_padding = 1; int channel_num = 1; int filter_num = 2; int filter_size = XSIZE*YSIZE; int feature_map_size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( compute_d_w_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, v,h,dw,is_init,input_size,lu_padding,channel_num,filter_num,filter_size,feature_map_size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( compute_d_w_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, v,h,dw,is_init,input_size,lu_padding,channel_num,filter_num,filter_size,feature_map_size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( compute_d_w_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, v,h,dw,is_init,input_size,lu_padding,channel_num,filter_num,filter_size,feature_map_size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a16fe732dfb5440b3a9d24c58a81b9a70a30337b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "compute_d_w_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *v = NULL; cudaMalloc(&v, XSIZE*YSIZE); float *h = NULL; cudaMalloc(&h, XSIZE*YSIZE); float *dw = NULL; cudaMalloc(&dw, XSIZE*YSIZE); bool is_init = 1; int input_size = XSIZE*YSIZE; int lu_padding = 1; int channel_num = 1; int filter_num = 2; int filter_size = XSIZE*YSIZE; int feature_map_size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); compute_d_w_kernel<<<gridBlock,threadBlock>>>(v,h,dw,is_init,input_size,lu_padding,channel_num,filter_num,filter_size,feature_map_size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { compute_d_w_kernel<<<gridBlock,threadBlock>>>(v,h,dw,is_init,input_size,lu_padding,channel_num,filter_num,filter_size,feature_map_size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { compute_d_w_kernel<<<gridBlock,threadBlock>>>(v,h,dw,is_init,input_size,lu_padding,channel_num,filter_num,filter_size,feature_map_size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
65433816c0cf64c0138674fafc260b628806de67.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <gamma.h> #include <mugiq_util_kernels.cuh> #include <mugiq_contract_kernels.cuh> #include <mugiq_displace_kernels.cuh> template <typename Float> void copyGammaCoeffStructToSymbol(){ GammaCoeff<Float> gamma_h; for(int m=0;m<N_GAMMA_;m++){ for(int n=0;n<N_SPIN_;n++){ gamma_h.column_index[m][n] = GammaColumnIndex(m,n); gamma_h.row_value[m][n] = {static_cast<Float>(GammaRowValue(m,n,0)), static_cast<Float>(GammaRowValue(m,n,1))}; } } copyGammaCoefftoSymbol<Float>(gamma_h); } template void copyGammaCoeffStructToSymbol<float>(); template void copyGammaCoeffStructToSymbol<double>(); //---------------------------------------------------------------------------- template <typename Float> void copyGammaMapStructToSymbol(){ GammaMap<Float> map_h; std::vector<int> minusG = minusGamma(); std::vector<int> idxG = indexMapGamma(); std::vector<Float> signGamma(N_GAMMA_,static_cast<Float>(1.0)); for(auto g: minusG) signGamma.at(g) = static_cast<Float>(-1.0); for(int m=0;m<N_GAMMA_;m++){ map_h.sign[m] = signGamma.at(m); map_h.index[m] = idxG.at(m); } copyGammaMaptoSymbol<Float>(map_h); } template void copyGammaMapStructToSymbol<float>(); template void copyGammaMapStructToSymbol<double>(); //---------------------------------------------------------------------------- template <typename Float> void createPhaseMatrixGPU(complex<Float> *phaseMatrix_d, const int* momMatrix_h, long long locV3, int Nmom, int FTSign, const int localL[], const int totalL[]){ int *momMatrix_d; hipMalloc((void**)&momMatrix_d, sizeof(int)*Nmom*MOM_DIM_); hipMemcpy(momMatrix_d, momMatrix_h, sizeof(int)*Nmom*MOM_DIM_, hipMemcpyHostToDevice); checkCudaError(); MomProjArg arg(locV3, Nmom, FTSign, localL, totalL); MomProjArg *arg_d; hipMalloc((void**)&(arg_d), sizeof(MomProjArg) ); checkCudaError(); hipMemcpy(arg_d, &arg, sizeof(MomProjArg), hipMemcpyHostToDevice); //-Call the kernel dim3 blockDim(THREADS_PER_BLOCK, 1, 1); dim3 gridDim((locV3 + blockDim.x -1)/blockDim.x, 1, 1); // spawn threads only for the spatial volume hipLaunchKernelGGL(( phaseMatrix_kernel<Float>), dim3(gridDim),dim3(blockDim), 0, 0, phaseMatrix_d, momMatrix_d, arg_d); hipDeviceSynchronize(); checkCudaError(); hipFree(momMatrix_d); hipFree(arg_d); arg_d = nullptr; } template void createPhaseMatrixGPU<float>(complex<float> *phaseMatrix_d, const int* momMatrix_h, long long locV3, int Nmom, int FTSign, const int localL[], const int totalL[]); template void createPhaseMatrixGPU<double>(complex<double> *phaseMatrix_d, const int* momMatrix_h, long long locV3, int Nmom, int FTSign, const int localL[], const int totalL[]); //---------------------------------------------------------------------------- template <typename Float, QudaFieldOrder fieldOrder> void performLoopContraction(complex<Float> *loopData_d, ColorSpinorField *eVecL, ColorSpinorField *eVecR, Float sigma){ typedef LoopContractArg<Float,fieldOrder> Arg; Arg arg(*eVecL, *eVecR, sigma); Arg *arg_d; hipMalloc((void**)&(arg_d), sizeof(arg) ); checkCudaError(); hipMemcpy(arg_d, &arg, sizeof(arg), hipMemcpyHostToDevice); checkCudaError(); if(arg.nParity != 2) errorQuda("%s: Loop contraction kernels support only Full Site Subset spinors!\n", __func__); dim3 blockDim(THREADS_PER_BLOCK, arg.nParity, SHMEM_BLOCK_Z_SIZE); dim3 gridDim((arg.volumeCB + blockDim.x -1)/blockDim.x, 1, 1); //- Size of the required shared memory in bytes size_t shmemByteSize = sizeof(complex<Float>) * NELEM_SHMEM_CPLX_BUF * blockDim.x * blockDim.y; //-Call the kernel hipLaunchKernelGGL(( loopContract_kernel<Float, Arg>), dim3(gridDim),dim3(blockDim),shmemByteSize, 0, loopData_d, arg_d); hipDeviceSynchronize(); checkCudaError(); hipFree(arg_d); arg_d = nullptr; } //- This start to become overwhelming, hopefully no other template parameters will be needed template void performLoopContraction<float,QUDA_FLOAT2_FIELD_ORDER> (complex<float> *loopData_d, ColorSpinorField *eVecL, ColorSpinorField *eVecR, float sigma); template void performLoopContraction<float,QUDA_FLOAT4_FIELD_ORDER> (complex<float> *loopData_d, ColorSpinorField *eVecL, ColorSpinorField *eVecR, float sigma); template void performLoopContraction<double,QUDA_FLOAT2_FIELD_ORDER>(complex<double> *loopData_d, ColorSpinorField *eVecL, ColorSpinorField *eVecR, double sigma); template void performLoopContraction<double,QUDA_FLOAT4_FIELD_ORDER>(complex<double> *loopData_d, ColorSpinorField *eVecL, ColorSpinorField *eVecR, double sigma); //---------------------------------------------------------------------------- template <typename Float> void convertIdxOrder_mapGamma(complex<Float> *dataPosMP_d, const complex<Float> *dataPos_d, int nData, int nLoop, int nParity, int volumeCB, const int localL[]){ //-Some checks if(nData != nLoop*N_GAMMA_) errorQuda("%s: This function assumes that nData = nLoop * NGamma\n", __func__); ConvertIdxArg arg(nData, nLoop, nParity, volumeCB, localL); ConvertIdxArg *arg_d; hipMalloc((void**)&(arg_d), sizeof(arg) ); checkCudaError(); hipMemcpy(arg_d, &arg, sizeof(arg), hipMemcpyHostToDevice); checkCudaError(); dim3 blockDim(THREADS_PER_BLOCK, arg.nParity, N_GAMMA_); dim3 gridDim((arg.volumeCB + blockDim.x -1)/blockDim.x, 1, 1); hipLaunchKernelGGL(( convertIdxOrder_mapGamma_kernel<Float>), dim3(gridDim),dim3(blockDim), 0, 0, dataPosMP_d, dataPos_d, arg_d); hipDeviceSynchronize(); checkCudaError(); hipFree(arg_d); arg_d = nullptr; } template void convertIdxOrder_mapGamma<float> (complex<float> *dataPosMP_d, const complex<float> *dataPos_d, int nData, int nLoop, int nParity, int volumeCB, const int localL[]); template void convertIdxOrder_mapGamma<double>(complex<double> *dataPosMP_d, const complex<double> *dataPos_d, int nData, int nLoop, int nParity, int volumeCB, const int localL[]); //---------------------------------------------------------------------------- //-Helper function for exchanging ghosts (boundaries) void exchangeGhostVec(ColorSpinorField *x){ const int nFace = 1; x->exchangeGhost((QudaParity)(1), nFace, 0); //- first argument is redundant when nParity = 2. nFace MUST be 1 for now. } template <typename Float, QudaFieldOrder order> void performCovariantDisplacementVector(ColorSpinorField *dst, ColorSpinorField *src, cudaGaugeField *gauge, DisplaceDir dispDir, DisplaceSign dispSign){ exchangeGhostVec(src); typedef CovDispVecArg<Float,order> DispArg; DispArg arg(*dst, *src, *gauge); DispArg *arg_d; hipMalloc((void**)&(arg_d), sizeof(arg)); checkCudaError(); hipMemcpy(arg_d, &arg, sizeof(arg), hipMemcpyHostToDevice); checkCudaError(); if(arg.nParity != 2) errorQuda("%s: This function supports only Full Site Subset fields!\n", __func__); dim3 blockDim(THREADS_PER_BLOCK, arg.nParity, 1); dim3 gridDim((arg.volumeCB + blockDim.x -1)/blockDim.x, 1, 1); hipLaunchKernelGGL(( covariantDisplacementVector_kernel<Float, DispArg, order>), dim3(gridDim),dim3(blockDim), 0, 0, arg_d, dispDir, dispSign); hipDeviceSynchronize(); checkCudaError(); hipFree(arg_d); arg_d = nullptr; } template void performCovariantDisplacementVector<float,QUDA_FLOAT2_FIELD_ORDER> (ColorSpinorField *dst, ColorSpinorField *src, cudaGaugeField *gauge, DisplaceDir dispDir, DisplaceSign dispSign); template void performCovariantDisplacementVector<float,QUDA_FLOAT4_FIELD_ORDER> (ColorSpinorField *dst, ColorSpinorField *src, cudaGaugeField *gauge, DisplaceDir dispDir, DisplaceSign dispSign); template void performCovariantDisplacementVector<double,QUDA_FLOAT2_FIELD_ORDER>(ColorSpinorField *dst, ColorSpinorField *src, cudaGaugeField *gauge, DisplaceDir dispDir, DisplaceSign dispSign); template void performCovariantDisplacementVector<double,QUDA_FLOAT4_FIELD_ORDER>(ColorSpinorField *dst, ColorSpinorField *src, cudaGaugeField *gauge, DisplaceDir dispDir, DisplaceSign dispSign); //----------------------------------------------------------------------------
65433816c0cf64c0138674fafc260b628806de67.cu
#include <gamma.h> #include <mugiq_util_kernels.cuh> #include <mugiq_contract_kernels.cuh> #include <mugiq_displace_kernels.cuh> template <typename Float> void copyGammaCoeffStructToSymbol(){ GammaCoeff<Float> gamma_h; for(int m=0;m<N_GAMMA_;m++){ for(int n=0;n<N_SPIN_;n++){ gamma_h.column_index[m][n] = GammaColumnIndex(m,n); gamma_h.row_value[m][n] = {static_cast<Float>(GammaRowValue(m,n,0)), static_cast<Float>(GammaRowValue(m,n,1))}; } } copyGammaCoefftoSymbol<Float>(gamma_h); } template void copyGammaCoeffStructToSymbol<float>(); template void copyGammaCoeffStructToSymbol<double>(); //---------------------------------------------------------------------------- template <typename Float> void copyGammaMapStructToSymbol(){ GammaMap<Float> map_h; std::vector<int> minusG = minusGamma(); std::vector<int> idxG = indexMapGamma(); std::vector<Float> signGamma(N_GAMMA_,static_cast<Float>(1.0)); for(auto g: minusG) signGamma.at(g) = static_cast<Float>(-1.0); for(int m=0;m<N_GAMMA_;m++){ map_h.sign[m] = signGamma.at(m); map_h.index[m] = idxG.at(m); } copyGammaMaptoSymbol<Float>(map_h); } template void copyGammaMapStructToSymbol<float>(); template void copyGammaMapStructToSymbol<double>(); //---------------------------------------------------------------------------- template <typename Float> void createPhaseMatrixGPU(complex<Float> *phaseMatrix_d, const int* momMatrix_h, long long locV3, int Nmom, int FTSign, const int localL[], const int totalL[]){ int *momMatrix_d; cudaMalloc((void**)&momMatrix_d, sizeof(int)*Nmom*MOM_DIM_); cudaMemcpy(momMatrix_d, momMatrix_h, sizeof(int)*Nmom*MOM_DIM_, cudaMemcpyHostToDevice); checkCudaError(); MomProjArg arg(locV3, Nmom, FTSign, localL, totalL); MomProjArg *arg_d; cudaMalloc((void**)&(arg_d), sizeof(MomProjArg) ); checkCudaError(); cudaMemcpy(arg_d, &arg, sizeof(MomProjArg), cudaMemcpyHostToDevice); //-Call the kernel dim3 blockDim(THREADS_PER_BLOCK, 1, 1); dim3 gridDim((locV3 + blockDim.x -1)/blockDim.x, 1, 1); // spawn threads only for the spatial volume phaseMatrix_kernel<Float><<<gridDim,blockDim>>>(phaseMatrix_d, momMatrix_d, arg_d); cudaDeviceSynchronize(); checkCudaError(); cudaFree(momMatrix_d); cudaFree(arg_d); arg_d = nullptr; } template void createPhaseMatrixGPU<float>(complex<float> *phaseMatrix_d, const int* momMatrix_h, long long locV3, int Nmom, int FTSign, const int localL[], const int totalL[]); template void createPhaseMatrixGPU<double>(complex<double> *phaseMatrix_d, const int* momMatrix_h, long long locV3, int Nmom, int FTSign, const int localL[], const int totalL[]); //---------------------------------------------------------------------------- template <typename Float, QudaFieldOrder fieldOrder> void performLoopContraction(complex<Float> *loopData_d, ColorSpinorField *eVecL, ColorSpinorField *eVecR, Float sigma){ typedef LoopContractArg<Float,fieldOrder> Arg; Arg arg(*eVecL, *eVecR, sigma); Arg *arg_d; cudaMalloc((void**)&(arg_d), sizeof(arg) ); checkCudaError(); cudaMemcpy(arg_d, &arg, sizeof(arg), cudaMemcpyHostToDevice); checkCudaError(); if(arg.nParity != 2) errorQuda("%s: Loop contraction kernels support only Full Site Subset spinors!\n", __func__); dim3 blockDim(THREADS_PER_BLOCK, arg.nParity, SHMEM_BLOCK_Z_SIZE); dim3 gridDim((arg.volumeCB + blockDim.x -1)/blockDim.x, 1, 1); //- Size of the required shared memory in bytes size_t shmemByteSize = sizeof(complex<Float>) * NELEM_SHMEM_CPLX_BUF * blockDim.x * blockDim.y; //-Call the kernel loopContract_kernel<Float, Arg><<<gridDim,blockDim,shmemByteSize>>>(loopData_d, arg_d); cudaDeviceSynchronize(); checkCudaError(); cudaFree(arg_d); arg_d = nullptr; } //- This start to become overwhelming, hopefully no other template parameters will be needed template void performLoopContraction<float,QUDA_FLOAT2_FIELD_ORDER> (complex<float> *loopData_d, ColorSpinorField *eVecL, ColorSpinorField *eVecR, float sigma); template void performLoopContraction<float,QUDA_FLOAT4_FIELD_ORDER> (complex<float> *loopData_d, ColorSpinorField *eVecL, ColorSpinorField *eVecR, float sigma); template void performLoopContraction<double,QUDA_FLOAT2_FIELD_ORDER>(complex<double> *loopData_d, ColorSpinorField *eVecL, ColorSpinorField *eVecR, double sigma); template void performLoopContraction<double,QUDA_FLOAT4_FIELD_ORDER>(complex<double> *loopData_d, ColorSpinorField *eVecL, ColorSpinorField *eVecR, double sigma); //---------------------------------------------------------------------------- template <typename Float> void convertIdxOrder_mapGamma(complex<Float> *dataPosMP_d, const complex<Float> *dataPos_d, int nData, int nLoop, int nParity, int volumeCB, const int localL[]){ //-Some checks if(nData != nLoop*N_GAMMA_) errorQuda("%s: This function assumes that nData = nLoop * NGamma\n", __func__); ConvertIdxArg arg(nData, nLoop, nParity, volumeCB, localL); ConvertIdxArg *arg_d; cudaMalloc((void**)&(arg_d), sizeof(arg) ); checkCudaError(); cudaMemcpy(arg_d, &arg, sizeof(arg), cudaMemcpyHostToDevice); checkCudaError(); dim3 blockDim(THREADS_PER_BLOCK, arg.nParity, N_GAMMA_); dim3 gridDim((arg.volumeCB + blockDim.x -1)/blockDim.x, 1, 1); convertIdxOrder_mapGamma_kernel<Float><<<gridDim,blockDim>>>(dataPosMP_d, dataPos_d, arg_d); cudaDeviceSynchronize(); checkCudaError(); cudaFree(arg_d); arg_d = nullptr; } template void convertIdxOrder_mapGamma<float> (complex<float> *dataPosMP_d, const complex<float> *dataPos_d, int nData, int nLoop, int nParity, int volumeCB, const int localL[]); template void convertIdxOrder_mapGamma<double>(complex<double> *dataPosMP_d, const complex<double> *dataPos_d, int nData, int nLoop, int nParity, int volumeCB, const int localL[]); //---------------------------------------------------------------------------- //-Helper function for exchanging ghosts (boundaries) void exchangeGhostVec(ColorSpinorField *x){ const int nFace = 1; x->exchangeGhost((QudaParity)(1), nFace, 0); //- first argument is redundant when nParity = 2. nFace MUST be 1 for now. } template <typename Float, QudaFieldOrder order> void performCovariantDisplacementVector(ColorSpinorField *dst, ColorSpinorField *src, cudaGaugeField *gauge, DisplaceDir dispDir, DisplaceSign dispSign){ exchangeGhostVec(src); typedef CovDispVecArg<Float,order> DispArg; DispArg arg(*dst, *src, *gauge); DispArg *arg_d; cudaMalloc((void**)&(arg_d), sizeof(arg)); checkCudaError(); cudaMemcpy(arg_d, &arg, sizeof(arg), cudaMemcpyHostToDevice); checkCudaError(); if(arg.nParity != 2) errorQuda("%s: This function supports only Full Site Subset fields!\n", __func__); dim3 blockDim(THREADS_PER_BLOCK, arg.nParity, 1); dim3 gridDim((arg.volumeCB + blockDim.x -1)/blockDim.x, 1, 1); covariantDisplacementVector_kernel<Float, DispArg, order><<<gridDim,blockDim>>>(arg_d, dispDir, dispSign); cudaDeviceSynchronize(); checkCudaError(); cudaFree(arg_d); arg_d = nullptr; } template void performCovariantDisplacementVector<float,QUDA_FLOAT2_FIELD_ORDER> (ColorSpinorField *dst, ColorSpinorField *src, cudaGaugeField *gauge, DisplaceDir dispDir, DisplaceSign dispSign); template void performCovariantDisplacementVector<float,QUDA_FLOAT4_FIELD_ORDER> (ColorSpinorField *dst, ColorSpinorField *src, cudaGaugeField *gauge, DisplaceDir dispDir, DisplaceSign dispSign); template void performCovariantDisplacementVector<double,QUDA_FLOAT2_FIELD_ORDER>(ColorSpinorField *dst, ColorSpinorField *src, cudaGaugeField *gauge, DisplaceDir dispDir, DisplaceSign dispSign); template void performCovariantDisplacementVector<double,QUDA_FLOAT4_FIELD_ORDER>(ColorSpinorField *dst, ColorSpinorField *src, cudaGaugeField *gauge, DisplaceDir dispDir, DisplaceSign dispSign); //----------------------------------------------------------------------------
044692a8356e9921fc7128c954b1dc6322a4b7fa.hip
// !!! This is a file automatically generated by hipify!!! #include "simple_yolo.hpp" #include <NvInfer.h> #include <NvOnnxParser.h> #include <hip/hip_runtime.h> #include <algorithm> #include <fstream> #include <memory> #include <string> #include <future> #include <condition_variable> #include <mutex> #include <thread> #include <queue> #if defined(_WIN32) # include <Windows.h> # include <wingdi.h> # include <Shlwapi.h> # pragma comment(lib, "shlwapi.lib") # undef min # undef max #else # include <dirent.h> # include <sys/types.h> # include <sys/stat.h> # include <unistd.h> # include <stdarg.h> #endif namespace SimpleYolo{ using namespace nvinfer1; using namespace std; using namespace cv; #define CURRENT_DEVICE_ID -1 #define GPU_BLOCK_THREADS 512 #define KernelPositionBlock \ int position = (blockDim.x * blockIdx.x + threadIdx.x); \ if (position >= (edge)) return; #define checkCudaRuntime(call) check_runtime(call, #call, __LINE__, __FILE__) static bool check_runtime(hipError_t e, const char* call, int line, const char *file); #define checkCudaKernel(...) \ __VA_ARGS__; \ do{hipError_t cudaStatus = hipPeekAtLastError(); \ if (cudaStatus != hipSuccess){ \ INFOE("launch failed: %s", hipGetErrorString(cudaStatus)); \ }} while(0); #define Assert(op) \ do{ \ bool cond = !(!(op)); \ if(!cond){ \ INFOF("Assert failed, " #op); \ } \ }while(false) /* @ level */ #define CURRENT_LOG_LEVEL LogLevel::Info #define INFOD(...) __log_func(__FILE__, __LINE__, LogLevel::Debug, __VA_ARGS__) #define INFOV(...) __log_func(__FILE__, __LINE__, LogLevel::Verbose, __VA_ARGS__) #define INFO(...) __log_func(__FILE__, __LINE__, LogLevel::Info, __VA_ARGS__) #define INFOW(...) __log_func(__FILE__, __LINE__, LogLevel::Warning, __VA_ARGS__) #define INFOE(...) __log_func(__FILE__, __LINE__, LogLevel::Error, __VA_ARGS__) #define INFOF(...) __log_func(__FILE__, __LINE__, LogLevel::Fatal, __VA_ARGS__) enum class NormType : int{ None = 0, MeanStd = 1, AlphaBeta = 2 }; enum class ChannelType : int{ None = 0, SwapRB = 1 }; /* @ alpha betaswap RB */ struct Norm{ float mean[3]; float std[3]; float alpha, beta; NormType type = NormType::None; ChannelType channel_type = ChannelType::None; // out = (x * alpha - mean) / std static Norm mean_std(const float mean[3], const float std[3], float alpha = 1/255.0f, ChannelType channel_type=ChannelType::None); // out = x * alpha + beta static Norm alpha_beta(float alpha, float beta = 0, ChannelType channel_type=ChannelType::None); // None static Norm None(); }; Norm Norm::mean_std(const float mean[3], const float std[3], float alpha, ChannelType channel_type){ Norm out; out.type = NormType::MeanStd; out.alpha = alpha; out.channel_type = channel_type; memcpy(out.mean, mean, sizeof(out.mean)); memcpy(out.std, std, sizeof(out.std)); return out; } Norm Norm::alpha_beta(float alpha, float beta, ChannelType channel_type){ Norm out; out.type = NormType::AlphaBeta; out.alpha = alpha; out.beta = beta; out.channel_type = channel_type; return out; } Norm Norm::None(){ return Norm(); } /* @ gpuidgpuid */ class AutoDevice{ public: AutoDevice(int device_id = 0){ hipGetDevice(&old_); if(old_ != device_id && device_id != -1) checkCudaRuntime(hipSetDevice(device_id)); } virtual ~AutoDevice(){ if(old_ != -1) checkCudaRuntime(hipSetDevice(old_)); } private: int old_ = -1; }; enum class LogLevel : int{ Debug = 5, Verbose = 4, Info = 3, Warning = 2, Error = 1, Fatal = 0 }; static void __log_func(const char* file, int line, LogLevel level, const char* fmt, ...); inline int upbound(int n, int align = 32){return (n + align - 1) / align * align;} static bool check_runtime(hipError_t e, const char* call, int line, const char *file){ if (e != hipSuccess) { INFOE("CUDA Runtime error %s # %s, code = %s [ %d ] in file %s:%d", call, hipGetErrorString(e), hipGetErrorName(e), e, file, line); return false; } return true; } #define TRT_STR(v) #v #define TRT_VERSION_STRING(major, minor, patch, build) TRT_STR(major) "." TRT_STR(minor) "." TRT_STR(patch) "." TRT_STR(build) const char* trt_version(){ return TRT_VERSION_STRING(NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH, NV_TENSORRT_BUILD); } static bool check_device_id(int device_id){ int device_count = -1; checkCudaRuntime(hipGetDeviceCount(&device_count)); if(device_id < 0 || device_id >= device_count){ INFOE("Invalid device id: %d, count = %d", device_id, device_count); return false; } return true; } static bool exists(const string& path){ #ifdef _WIN32 return ::PathFileExistsA(path.c_str()); #else return access(path.c_str(), R_OK) == 0; #endif } static const char* level_string(LogLevel level){ switch (level){ case LogLevel::Debug: return "debug"; case LogLevel::Verbose: return "verbo"; case LogLevel::Info: return "info"; case LogLevel::Warning: return "warn"; case LogLevel::Error: return "error"; case LogLevel::Fatal: return "fatal"; default: return "unknow"; } } template<typename _T> static string join_dims(const vector<_T>& dims){ stringstream output; char buf[64]; const char* fmts[] = {"%d", " x %d"}; for(int i = 0; i < dims.size(); ++i){ snprintf(buf, sizeof(buf), fmts[i != 0], dims[i]); output << buf; } return output.str(); } static bool save_file(const string& file, const void* data, size_t length){ FILE* f = fopen(file.c_str(), "wb"); if (!f) return false; if (data and length > 0){ if (fwrite(data, 1, length, f) not_eq length){ fclose(f); return false; } } fclose(f); return true; } static bool save_file(const string& file, const vector<uint8_t>& data){ return save_file(file, data.data(), data.size()); } static string file_name(const string& path, bool include_suffix){ if (path.empty()) return ""; int p = path.rfind('/'); #ifdef U_OS_WINDOWS int e = path.rfind('\\'); p = ::max(p, e); #endif p += 1; //include suffix if (include_suffix) return path.substr(p); int u = path.rfind('.'); if (u == -1) return path.substr(p); if (u <= p) u = path.size(); return path.substr(p, u - p); } /* @ @ */ vector<string> glob_image_files(const string& directory){ /* @ "*.jpg;*.png;*.bmp;*.jpeg;*.tiff" */ vector<string> files, output; set<string> pattern_set{"jpg", "png", "bmp", "jpeg", "tiff"}; if(directory.empty()){ INFOE("Glob images from folder failed, folder is empty"); return output; } try{ cv::glob(directory + "/*", files, true); }catch(...){ INFOE("Glob %s failed", directory.c_str()); return output; } for(int i = 0; i < files.size(); ++i){ auto& file = files[i]; int p = file.rfind("."); if(p == -1) continue; auto suffix = file.substr(p+1); std::transform(suffix.begin(), suffix.end(), suffix.begin(), [](char c){ if(c >= 'A' && c <= 'Z') c -= 'A' + 'a'; return c; }); if(pattern_set.find(suffix) != pattern_set.end()) output.push_back(file); } return output; } static void __log_func(const char* file, int line, LogLevel level, const char* fmt, ...){ if(level > CURRENT_LOG_LEVEL) return; va_list vl; va_start(vl, fmt); char buffer[2048]; string filename = file_name(file, true); int n = snprintf(buffer, sizeof(buffer), "[%s][%s:%d]:", level_string(level), filename.c_str(), line); vsnprintf(buffer + n, sizeof(buffer) - n, fmt, vl); fprintf(stdout, "%s\n", buffer); if (level == LogLevel::Fatal) { fflush(stdout); abort(); } } static dim3 grid_dims(int numJobs) { int numBlockThreads = numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS; return dim3(((numJobs + numBlockThreads - 1) / (float)numBlockThreads)); } static dim3 block_dims(int numJobs) { return numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS; } static int get_device(int device_id){ if(device_id != CURRENT_DEVICE_ID){ check_device_id(device_id); return device_id; } checkCudaRuntime(hipGetDevice(&device_id)); return device_id; } void set_device(int device_id) { if (device_id == -1) return; checkCudaRuntime(hipSetDevice(device_id)); } /////////////////////////////CUDA kernels//////////////////////////////////////////////// const int NUM_BOX_ELEMENT = 7; // left, top, right, bottom, confidence, class, keepflag static __device__ void affine_project(float* matrix, float x, float y, float* ox, float* oy){ *ox = matrix[0] * x + matrix[1] * y + matrix[2]; *oy = matrix[3] * x + matrix[4] * y + matrix[5]; } /* @ @ */ static __global__ void decode_kernel(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float* invert_affine_matrix, float* parray, int max_objects){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= num_bboxes) return; float* pitem = predict + (5 + num_classes) * position; float objectness = pitem[4]; if(objectness < confidence_threshold) return; float* class_confidence = pitem + 5; float confidence = *class_confidence++; int label = 0; for(int i = 1; i < num_classes; ++i, ++class_confidence){ if(*class_confidence > confidence){ confidence = *class_confidence; label = i; } } confidence *= objectness; if(confidence < confidence_threshold) return; int index = atomicAdd(parray, 1); if(index >= max_objects) return; float cx = *pitem++; float cy = *pitem++; float width = *pitem++; float height = *pitem++; float left = cx - width * 0.5f; float top = cy - height * 0.5f; float right = cx + width * 0.5f; float bottom = cy + height * 0.5f; affine_project(invert_affine_matrix, left, top, &left, &top); affine_project(invert_affine_matrix, right, bottom, &right, &bottom); float* pout_item = parray + 1 + index * NUM_BOX_ELEMENT; *pout_item++ = left; *pout_item++ = top; *pout_item++ = right; *pout_item++ = bottom; *pout_item++ = confidence; *pout_item++ = label; *pout_item++ = 1; // 1 = keep, 0 = ignore } static __device__ float box_iou( float aleft, float atop, float aright, float abottom, float bleft, float btop, float bright, float bbottom ){ float cleft = max(aleft, bleft); float ctop = max(atop, btop); float cright = min(aright, bright); float cbottom = min(abottom, bbottom); float c_area = max(cright - cleft, 0.0f) * max(cbottom - ctop, 0.0f); if(c_area == 0.0f) return 0.0f; float a_area = max(0.0f, aright - aleft) * max(0.0f, abottom - atop); float b_area = max(0.0f, bright - bleft) * max(0.0f, bbottom - btop); return c_area / (a_area + b_area - c_area); } static __global__ void fast_nms_kernel(float* bboxes, int max_objects, float threshold){ int position = (blockDim.x * blockIdx.x + threadIdx.x); int count = min((int)*bboxes, max_objects); if (position >= count) return; // left, top, right, bottom, confidence, class, keepflag float* pcurrent = bboxes + 1 + position * NUM_BOX_ELEMENT; for(int i = 0; i < count; ++i){ float* pitem = bboxes + 1 + i * NUM_BOX_ELEMENT; if(i == position || pcurrent[5] != pitem[5]) continue; if(pitem[4] >= pcurrent[4]){ if(pitem[4] == pcurrent[4] && i < position) continue; float iou = box_iou( pcurrent[0], pcurrent[1], pcurrent[2], pcurrent[3], pitem[0], pitem[1], pitem[2], pitem[3] ); if(iou > threshold){ pcurrent[6] = 0; // 1=keep, 0=ignore return; } } } } static void decode_kernel_invoker(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, hipStream_t stream){ auto grid = grid_dims(num_bboxes); auto block = block_dims(num_bboxes); /* @ */ hipLaunchKernelGGL(( checkCudaKernel(decode_kernel), dim3(grid), dim3(block), 0, stream, predict, num_bboxes, num_classes, confidence_threshold, invert_affine_matrix, parray, max_objects)); grid = grid_dims(max_objects); block = block_dims(max_objects); hipLaunchKernelGGL(( checkCudaKernel(fast_nms_kernel), dim3(grid), dim3(block), 0, stream, parray, max_objects, nms_threshold)); } /* @ @ */ static __global__ void warp_affine_bilinear_and_normalize_plane_kernel(uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, uint8_t const_value_st, float* warp_affine_matrix_2_3, Norm norm, int edge){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; float m_x1 = warp_affine_matrix_2_3[0]; float m_y1 = warp_affine_matrix_2_3[1]; float m_z1 = warp_affine_matrix_2_3[2]; float m_x2 = warp_affine_matrix_2_3[3]; float m_y2 = warp_affine_matrix_2_3[4]; float m_z2 = warp_affine_matrix_2_3[5]; int dx = position % dst_width; int dy = position / dst_width; float src_x = m_x1 * dx + m_y1 * dy + m_z1; float src_y = m_x2 * dx + m_y2 * dy + m_z2; float c0, c1, c2; if(src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height){ // out of range c0 = const_value_st; c1 = const_value_st; c2 = const_value_st; }else{ int y_low = floorf(src_y); int x_low = floorf(src_x); int y_high = y_low + 1; int x_high = x_low + 1; uint8_t const_value[] = {const_value_st, const_value_st, const_value_st}; float ly = src_y - y_low; float lx = src_x - x_low; float hy = 1 - ly; float hx = 1 - lx; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; uint8_t* v1 = const_value; uint8_t* v2 = const_value; uint8_t* v3 = const_value; uint8_t* v4 = const_value; if(y_low >= 0){ if (x_low >= 0) v1 = src + y_low * src_line_size + x_low * 3; if (x_high < src_width) v2 = src + y_low * src_line_size + x_high * 3; } if(y_high < src_height){ if (x_low >= 0) v3 = src + y_high * src_line_size + x_low * 3; if (x_high < src_width) v4 = src + y_high * src_line_size + x_high * 3; } // same to opencv c0 = floorf(w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0] + 0.5f); c1 = floorf(w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1] + 0.5f); c2 = floorf(w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2] + 0.5f); } if(norm.channel_type == ChannelType::SwapRB){ float t = c2; c2 = c0; c0 = t; } if(norm.type == NormType::MeanStd){ c0 = (c0 * norm.alpha - norm.mean[0]) / norm.std[0]; c1 = (c1 * norm.alpha - norm.mean[1]) / norm.std[1]; c2 = (c2 * norm.alpha - norm.mean[2]) / norm.std[2]; }else if(norm.type == NormType::AlphaBeta){ c0 = c0 * norm.alpha + norm.beta; c1 = c1 * norm.alpha + norm.beta; c2 = c2 * norm.alpha + norm.beta; } int area = dst_width * dst_height; float* pdst_c0 = dst + dy * dst_width + dx; float* pdst_c1 = pdst_c0 + area; float* pdst_c2 = pdst_c1 + area; *pdst_c0 = c0; *pdst_c1 = c1; *pdst_c2 = c2; } static void warp_affine_bilinear_and_normalize_plane( uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, float* matrix_2_3, uint8_t const_value, const Norm& norm, hipStream_t stream) { int jobs = dst_width * dst_height; auto grid = grid_dims(jobs); auto block = block_dims(jobs); checkCudaKernel(warp_affine_bilinear_and_normalize_plane_kernel << <grid, block, 0, stream >> > ( src, src_line_size, src_width, src_height, dst, dst_width, dst_height, const_value, matrix_2_3, norm, jobs )); } //////////////////////////////class MixMemory///////////////////////////////////////////////// /* @ gpu/cpu gpucpu cpupinned memorygpu cudaMallocHostcuda context @ CPUGPUcopy 1. 2. 3. GPUid 4. cpu->gpu, gpu->cpu copy MixMemory void* cpu_ = nullptr; size_t cpu_size_ = 0; bool owner_cpu_ = true; int device_id_ = 0; void* gpu_ = nullptr; size_t gpu_size_ = 0; bool owner_gpu_ = true; MixMemory */ class MixMemory { public: /* @ */ MixMemory(int device_id = CURRENT_DEVICE_ID); MixMemory(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size); virtual ~MixMemory(); /* @ gpucpu */ void* gpu(size_t size); void* cpu(size_t size); /* @ */ void release_gpu(); void release_cpu(); void release_all(); /* @ */ inline bool owner_gpu() const{return owner_gpu_;} inline bool owner_cpu() const{return owner_cpu_;} /* @ */ inline size_t cpu_size() const{return cpu_size_;} inline size_t gpu_size() const{return gpu_size_;} /* @ id */ inline int device_id() const{return device_id_;} /* @ GPU */ inline void* gpu() const { return gpu_; } // Pinned Memory inline void* cpu() const { return cpu_; } void reference_data(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size); private: /* @ cpu */ void* cpu_ = nullptr; /* @ cpu */ size_t cpu_size_ = 0; bool owner_cpu_ = true; /* @ GPU 0 */ int device_id_ = 0; /* @ GPU */ void* gpu_ = nullptr; /* @ GPU */ size_t gpu_size_ = 0; bool owner_gpu_ = true; }; MixMemory::MixMemory(int device_id){ device_id_ = get_device(device_id); } /* @ CPUGPU */ MixMemory::MixMemory(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size){ reference_data(cpu, cpu_size, gpu, gpu_size); } /* @ */ void MixMemory::reference_data(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size){ release_all(); if(cpu == nullptr || cpu_size == 0){ cpu = nullptr; cpu_size = 0; } if(gpu == nullptr || gpu_size == 0){ gpu = nullptr; gpu_size = 0; } /* @ */ this->cpu_ = cpu; this->cpu_size_ = cpu_size; this->gpu_ = gpu; this->gpu_size_ = gpu_size; /* @ */ /* @ MixMemory */ this->owner_cpu_ = !(cpu && cpu_size > 0); this->owner_gpu_ = !(gpu && gpu_size > 0); checkCudaRuntime(hipGetDevice(&device_id_)); } MixMemory::~MixMemory() { release_all(); } void* MixMemory::gpu(size_t size) { /* @ sizesize 0 */ if (gpu_size_ < size) { release_gpu(); gpu_size_ = size; AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(hipMalloc(&gpu_, size)); checkCudaRuntime(hipMemset(gpu_, 0, size)); } return gpu_; } void* MixMemory::cpu(size_t size) { if (cpu_size_ < size) { release_cpu(); cpu_size_ = size; AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(hipHostMalloc(&cpu_, size)); Assert(cpu_ != nullptr); memset(cpu_, 0, size); } return cpu_; } /* @ CPU */ void MixMemory::release_cpu() { if (cpu_) { if(owner_cpu_){ AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(hipHostFree(cpu_)); } cpu_ = nullptr; } cpu_size_ = 0; } /* @ GPU */ void MixMemory::release_gpu() { if (gpu_) { if(owner_gpu_){ AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(hipFree(gpu_)); } gpu_ = nullptr; } gpu_size_ = 0; } /* @ */ void MixMemory::release_all() { release_cpu(); release_gpu(); } /////////////////////////////////class Tensor//////////////////////////////////////////////// /* @ Tensor NN save_to_filepython */ enum class DataHead : int{ Init = 0, Device = 1, Host = 2 }; class Tensor { public: Tensor(const Tensor& other) = delete; Tensor& operator = (const Tensor& other) = delete; /* @ */ explicit Tensor(std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(int n, int c, int h, int w, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(int ndims, const int* dims, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(const std::vector<int>& dims, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); virtual ~Tensor(); int numel() const; inline int ndims() const{return shape_.size();} /* @ */ inline int size(int index) const{return shape_[index];} /* @ */ inline int shape(int index) const{return shape_[index];} /* @ */ /* @ */ inline int batch() const{return shape_[0];} inline int channel() const{return shape_[1];} inline int height() const{return shape_[2];} inline int width() const{return shape_[3];} inline const std::vector<int>& dims() const { return shape_; } inline int bytes() const { return bytes_; } inline int bytes(int start_axis) const { return count(start_axis) * element_size(); } /* @ */ inline int element_size() const { return sizeof(float); } inline DataHead head() const { return head_; } /* @ GPUcpu */ std::shared_ptr<Tensor> clone() const; Tensor& release(); /* @ */ Tensor& set_to(float value); bool empty() const; /* @ */ /* @ tensor */ template<typename ... _Args> int offset(int index, _Args ... index_args) const{ const int index_array[] = {index, index_args...}; return offset_array(sizeof...(index_args) + 1, index_array); } int offset_array(const std::vector<int>& index) const; int offset_array(size_t size, const int* index_array) const; template<typename ... _Args> Tensor& resize(int dim_size, _Args ... dim_size_args){ const int dim_size_array[] = {dim_size, dim_size_args...}; return resize(sizeof...(dim_size_args) + 1, dim_size_array); } Tensor& resize(int ndims, const int* dims); Tensor& resize(const std::vector<int>& dims); Tensor& resize_single_dim(int idim, int size); int count(int start_axis = 0) const; int device() const{return device_id_;} /* @ copyGPUcopyCPU */ Tensor& to_gpu(bool copy=true); Tensor& to_cpu(bool copy=true); /* @ copyGPUcopyCPU */ inline void* cpu() const { ((Tensor*)this)->to_cpu(); return data_->cpu(); } inline void* gpu() const { ((Tensor*)this)->to_gpu(); return data_->gpu(); } /* @ */ template<typename DType> inline const DType* cpu() const { return (DType*)cpu(); } template<typename DType> inline DType* cpu() { return (DType*)cpu(); } /* @ https://blog.csdn.net/zj510/article/details/36633603?spm=1001.2101.3001.6650.10&utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7EBlogCommendFromBaidu%7Edefault-10.highlightwordscore&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7EBlogCommendFromBaidu%7Edefault-10.highlightwordscore */ /* @ GPU */ template<typename DType, typename ... _Args> inline DType* cpu(int i, _Args&& ... args) { return cpu<DType>() + offset(i, args...); } template<typename DType> inline const DType* gpu() const { return (DType*)gpu(); } template<typename DType> inline DType* gpu() { return (DType*)gpu(); } template<typename DType, typename ... _Args> inline DType* gpu(int i, _Args&& ... args) { return gpu<DType>() + offset(i, args...); } template<typename DType, typename ... _Args> inline DType& at(int i, _Args&& ... args) { return *(cpu<DType>() + offset(i, args...)); } /* @ */ std::shared_ptr<MixMemory> get_data() const {return data_;} std::shared_ptr<MixMemory> get_workspace() const {return workspace_;} Tensor& set_workspace(std::shared_ptr<MixMemory> workspace) {workspace_ = workspace; return *this;} /* @ */ hipStream_t get_stream() const{return stream_;} Tensor& set_stream(hipStream_t stream){stream_ = stream; return *this;} Tensor& set_mat (int n, const cv::Mat& image); Tensor& set_norm_mat(int n, const cv::Mat& image, float mean[3], float std[3]); /* @ cpu<float>(n, c) */ cv::Mat at_mat(int n = 0, int c = 0) { return cv::Mat(height(), width(), CV_32F, cpu<float>(n, c)); } /* @ */ Tensor& synchronize(); const char* shape_string() const{return shape_string_;} const char* descriptor() const; /* @ */ Tensor& copy_from_gpu(size_t offset, const void* src, size_t num_element, int device_id = CURRENT_DEVICE_ID); /** # pythonTensor import numpy as np def load_tensor(file): with open(file, "rb") as f: binary_data = f.read() magic_number, ndims, dtype = np.frombuffer(binary_data, np.uint32, count=3, offset=0) assert magic_number == 0xFCCFE2E2, f"{file} not a tensor file." dims = np.frombuffer(binary_data, np.uint32, count=ndims, offset=3 * 4) if dtype == 0: np_dtype = np.float32 elif dtype == 1: np_dtype = np.float16 else: assert False, f"Unsupport dtype = {dtype}, can not convert to numpy dtype" return np.frombuffer(binary_data, np_dtype, offset=(ndims + 3) * 4).reshape(*dims) **/ bool save_to_file(const std::string& file) const; private: Tensor& compute_shape_string(); Tensor& adajust_memory_by_update_dims_or_type(); void setup_data(std::shared_ptr<MixMemory> data); private: /* @ tensorshape */ std::vector<int> shape_; /* @ tensor */ size_t bytes_ = 0; /* @ CPUGPU */ DataHead head_ = DataHead::Init; /* @ */ hipStream_t stream_ = nullptr; int device_id_ = 0; char shape_string_[100]; char descriptor_string_[100]; /* @ MixMemory */ std::shared_ptr<MixMemory> data_; std::shared_ptr<MixMemory> workspace_; }; Tensor::Tensor(int n, int c, int h, int w, shared_ptr<MixMemory> data, int device_id) { this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(n, c, h, w); } Tensor::Tensor(const std::vector<int>& dims, shared_ptr<MixMemory> data, int device_id){ this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(dims); } Tensor::Tensor(int ndims, const int* dims, shared_ptr<MixMemory> data, int device_id) { this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(ndims, dims); } Tensor::Tensor(shared_ptr<MixMemory> data, int device_id){ shape_string_[0] = 0; descriptor_string_[0] = 0; this->device_id_ = get_device(device_id); setup_data(data); } Tensor::~Tensor() { release(); } const char* Tensor::descriptor() const{ char* descriptor_ptr = (char*)descriptor_string_; int device_id = device(); snprintf(descriptor_ptr, sizeof(descriptor_string_), "Tensor:%p, %s, CUDA:%d", data_.get(), shape_string_, device_id ); return descriptor_ptr; } Tensor& Tensor::compute_shape_string(){ // clean string shape_string_[0] = 0; char* buffer = shape_string_; size_t buffer_size = sizeof(shape_string_); for(int i = 0; i < shape_.size(); ++i){ int size = 0; if(i < shape_.size() - 1) size = snprintf(buffer, buffer_size, "%d x ", shape_[i]); else size = snprintf(buffer, buffer_size, "%d", shape_[i]); buffer += size; buffer_size -= size; } return *this; } /* @ cpuGPU */ void Tensor::setup_data(shared_ptr<MixMemory> data){ data_ = data; if(data_ == nullptr){ data_ = make_shared<MixMemory>(device_id_); }else{ device_id_ = data_->device_id(); } head_ = DataHead::Init; if(data_->cpu()){ head_ = DataHead::Host; } if(data_->gpu()){ head_ = DataHead::Device; } } Tensor& Tensor::copy_from_gpu(size_t offset, const void* src, size_t num_element, int device_id){ if(head_ == DataHead::Init) to_gpu(false); size_t offset_location = offset * element_size(); if(offset_location >= bytes_){ INFOE("Offset location[%lld] >= bytes_[%lld], out of range", offset_location, bytes_); return *this; } size_t copyed_bytes = num_element * element_size(); size_t remain_bytes = bytes_ - offset_location; if(copyed_bytes > remain_bytes){ INFOE("Copyed bytes[%lld] > remain bytes[%lld], out of range", copyed_bytes, remain_bytes); return *this; } if(head_ == DataHead::Device){ int current_device_id = get_device(device_id); int gpu_device_id = device(); if(current_device_id != gpu_device_id){ checkCudaRuntime(hipMemcpyPeerAsync(gpu<unsigned char>() + offset_location, gpu_device_id, src, current_device_id, copyed_bytes, stream_)); //checkCudaRuntime(hipMemcpyAsync(gpu<unsigned char>() + offset_location, src, copyed_bytes, hipMemcpyDeviceToDevice, stream_)); } else{ checkCudaRuntime(hipMemcpyAsync(gpu<unsigned char>() + offset_location, src, copyed_bytes, hipMemcpyDeviceToDevice, stream_)); } }else if(head_ == DataHead::Host){ AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(hipMemcpyAsync(cpu<unsigned char>() + offset_location, src, copyed_bytes, hipMemcpyDeviceToHost, stream_)); }else{ INFOE("Unsupport head type %d", head_); } return *this; } Tensor& Tensor::release() { data_->release_all(); shape_.clear(); bytes_ = 0; head_ = DataHead::Init; return *this; } bool Tensor::empty() const{ return data_->cpu() == nullptr && data_->gpu() == nullptr; } int Tensor::count(int start_axis) const { if(start_axis >= 0 && start_axis < shape_.size()){ int size = 1; for (int i = start_axis; i < shape_.size(); ++i) size *= shape_[i]; return size; }else{ return 0; } } Tensor& Tensor::resize(const std::vector<int>& dims) { return resize(dims.size(), dims.data()); } int Tensor::numel() const{ int value = shape_.empty() ? 0 : 1; for(int i = 0; i < shape_.size(); ++i){ value *= shape_[i]; } return value; } Tensor& Tensor::resize_single_dim(int idim, int size){ Assert(idim >= 0 && idim < shape_.size()); auto new_shape = shape_; new_shape[idim] = size; return resize(new_shape); } Tensor& Tensor::resize(int ndims, const int* dims) { vector<int> setup_dims(ndims); for(int i = 0; i < ndims; ++i){ int dim = dims[i]; if(dim == -1){ Assert(ndims == shape_.size()); dim = shape_[i]; } setup_dims[i] = dim; } this->shape_ = setup_dims; this->adajust_memory_by_update_dims_or_type(); this->compute_shape_string(); return *this; } Tensor& Tensor::adajust_memory_by_update_dims_or_type(){ int needed_size = this->numel() * element_size(); if(needed_size > this->bytes_){ head_ = DataHead::Init; } this->bytes_ = needed_size; return *this; } Tensor& Tensor::synchronize(){ AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(hipStreamSynchronize(stream_)); return *this; } /* @ gpu0 cpuGPU */ Tensor& Tensor::to_gpu(bool copy) { /* @ GPU */ if (head_ == DataHead::Device) return *this; /* @ GPU,GPU0 */ head_ = DataHead::Device; data_->gpu(bytes_); /* @ copyGPUcpu */ if (copy && data_->cpu() != nullptr) { AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(hipMemcpyAsync(data_->gpu(), data_->cpu(), bytes_, hipMemcpyHostToDevice, stream_)); } return *this; } Tensor& Tensor::to_cpu(bool copy) { if (head_ == DataHead::Host) return *this; head_ = DataHead::Host; data_->cpu(bytes_); if (copy && data_->gpu() != nullptr) { AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(hipMemcpyAsync(data_->cpu(), data_->gpu(), bytes_, hipMemcpyDeviceToHost, stream_)); checkCudaRuntime(hipStreamSynchronize(stream_)); } return *this; } int Tensor::offset_array(size_t size, const int* index_array) const{ Assert(size <= shape_.size()); int value = 0; for(int i = 0; i < shape_.size(); ++i){ if(i < size) value += index_array[i]; if(i + 1 < shape_.size()) value *= shape_[i+1]; } return value; } int Tensor::offset_array(const std::vector<int>& index_array) const{ return offset_array(index_array.size(), index_array.data()); } bool Tensor::save_to_file(const std::string& file) const{ if(empty()) return false; FILE* f = fopen(file.c_str(), "wb"); if(f == nullptr) return false; int ndims = this->ndims(); int dtype_ = 0; unsigned int head[3] = {0xFCCFE2E2, ndims, static_cast<unsigned int>(dtype_)}; fwrite(head, 1, sizeof(head), f); fwrite(shape_.data(), 1, sizeof(shape_[0]) * shape_.size(), f); fwrite(cpu(), 1, bytes_, f); fclose(f); return true; } /////////////////////////////////class TRTInferImpl//////////////////////////////////////////////// class Logger : public ILogger { public: virtual void log(Severity severity, const char* msg) noexcept override { if (severity == Severity::kINTERNAL_ERROR) { INFOE("NVInfer INTERNAL_ERROR: %s", msg); abort(); }else if (severity == Severity::kERROR) { INFOE("NVInfer: %s", msg); } else if (severity == Severity::kWARNING) { INFOW("NVInfer: %s", msg); } else if (severity == Severity::kINFO) { INFOD("NVInfer: %s", msg); } else { INFOD("%s", msg); } } }; static Logger gLogger; template<typename _T> static void destroy_nvidia_pointer(_T* ptr) { if (ptr) ptr->destroy(); } /* @ tensorrt */ class EngineContext { public: virtual ~EngineContext() { destroy(); } void set_stream(hipStream_t stream){ if(owner_stream_){ if (stream_) {hipStreamDestroy(stream_);} owner_stream_ = false; } stream_ = stream; } bool build_model(const void* pdata, size_t size) { destroy(); if(pdata == nullptr || size == 0) return false; owner_stream_ = true; /* @ */ checkCudaRuntime(hipStreamCreate(&stream_)); if(stream_ == nullptr) return false; /* @ tensorrt runtime_engine_context_ */ runtime_ = shared_ptr<IRuntime>(createInferRuntime(gLogger), destroy_nvidia_pointer<IRuntime>); if (runtime_ == nullptr) return false; engine_ = shared_ptr<ICudaEngine>(runtime_->deserializeCudaEngine(pdata, size, nullptr), destroy_nvidia_pointer<ICudaEngine>); if (engine_ == nullptr) return false; //runtime_->setDLACore(0); context_ = shared_ptr<IExecutionContext>(engine_->createExecutionContext(), destroy_nvidia_pointer<IExecutionContext>); return context_ != nullptr; } private: void destroy() { context_.reset(); engine_.reset(); runtime_.reset(); if(owner_stream_){ if (stream_) {hipStreamDestroy(stream_);} } stream_ = nullptr; } public: hipStream_t stream_ = nullptr; bool owner_stream_ = false; shared_ptr<IExecutionContext> context_; shared_ptr<ICudaEngine> engine_; shared_ptr<IRuntime> runtime_ = nullptr; }; /* @ Implimplementation @PeterHuang @Kx Wang @ */ class TRTInferImpl{ public: virtual ~TRTInferImpl(); bool load(const std::string& file); // @Peter @ string bool load_from_memory(const void* pdata, size_t size); void destroy(); void forward(bool sync); int get_max_batch_size(); hipStream_t get_stream(); void set_stream(hipStream_t stream); void synchronize(); size_t get_device_memory_size(); std::shared_ptr<MixMemory> get_workspace(); std::shared_ptr<Tensor> input(int index = 0); std::string get_input_name(int index = 0); std::shared_ptr<Tensor> output(int index = 0); std::string get_output_name(int index = 0); std::shared_ptr<Tensor> tensor(const std::string& name); bool is_output_name(const std::string& name); bool is_input_name(const std::string& name); void set_input (int index, std::shared_ptr<Tensor> tensor); void set_output(int index, std::shared_ptr<Tensor> tensor); std::shared_ptr<std::vector<uint8_t>> serial_engine(); void print(); int num_output(); int num_input(); int device(); private: void build_engine_input_and_outputs_mapper(); private: std::vector<std::shared_ptr<Tensor>> inputs_; std::vector<std::shared_ptr<Tensor>> outputs_; std::vector<int> inputs_map_to_ordered_index_; std::vector<int> outputs_map_to_ordered_index_; std::vector<std::string> inputs_name_; std::vector<std::string> outputs_name_; std::vector<std::shared_ptr<Tensor>> orderdBlobs_; std::map<std::string, int> blobsNameMapper_; std::shared_ptr<EngineContext> context_; std::vector<void*> bindingsPtr_; std::shared_ptr<MixMemory> workspace_; int device_ = 0; }; //////////////////////////////////////////////////////////////////////////////////// TRTInferImpl::~TRTInferImpl(){ destroy(); } void TRTInferImpl::destroy() { int old_device = 0; checkCudaRuntime(hipGetDevice(&old_device)); checkCudaRuntime(hipSetDevice(device_)); this->context_.reset(); this->blobsNameMapper_.clear(); this->outputs_.clear(); this->inputs_.clear(); this->inputs_name_.clear(); this->outputs_name_.clear(); checkCudaRuntime(hipSetDevice(old_device)); } void TRTInferImpl::print(){ if(!context_){ INFOW("Infer print, nullptr."); return; } INFO("Infer %p detail", this); INFO("\tMax Batch Size: %d", this->get_max_batch_size()); INFO("\tInputs: %d", inputs_.size()); for(int i = 0; i < inputs_.size(); ++i){ auto& tensor = inputs_[i]; auto& name = inputs_name_[i]; INFO("\t\t%d.%s : shape {%s}", i, name.c_str(), tensor->shape_string()); } INFO("\tOutputs: %d", outputs_.size()); for(int i = 0; i < outputs_.size(); ++i){ auto& tensor = outputs_[i]; auto& name = outputs_name_[i]; INFO("\t\t%d.%s : shape {%s}", i, name.c_str(), tensor->shape_string()); } } std::shared_ptr<std::vector<uint8_t>> TRTInferImpl::serial_engine() { auto memory = this->context_->engine_->serialize(); auto output = make_shared<std::vector<uint8_t>>((uint8_t*)memory->data(), (uint8_t*)memory->data()+memory->size()); memory->destroy(); return output; } bool TRTInferImpl::load_from_memory(const void* pdata, size_t size) { if (pdata == nullptr || size == 0) return false; context_.reset(new EngineContext()); //build model if (!context_->build_model(pdata, size)) { context_.reset(); return false; } workspace_.reset(new MixMemory()); hipGetDevice(&device_); build_engine_input_and_outputs_mapper(); return true; } static std::vector<uint8_t> load_file(const string& file){ ifstream in(file, ios::in | ios::binary); if (!in.is_open()) return {}; in.seekg(0, ios::end); size_t length = in.tellg(); std::vector<uint8_t> data; if (length > 0){ in.seekg(0, ios::beg); data.resize(length); in.read((char*)&data[0], length); } in.close(); return data; } bool TRTInferImpl::load(const std::string& file) { /* @ */ auto data = load_file(file); if (data.empty()) return false; context_.reset(new EngineContext()); //build model if (!context_->build_model(data.data(), data.size())) { context_.reset(); return false; } workspace_.reset(new MixMemory()); hipGetDevice(&device_); /* @ */ build_engine_input_and_outputs_mapper(); return true; } size_t TRTInferImpl::get_device_memory_size() { EngineContext* context = (EngineContext*)this->context_.get(); return context->context_->getEngine().getDeviceMemorySize(); } void TRTInferImpl::build_engine_input_and_outputs_mapper() { /* @ */ EngineContext* context = (EngineContext*)this->context_.get(); /* @ */ int nbBindings = context->engine_->getNbBindings(); /* @ batch */ int max_batchsize = context->engine_->getMaxBatchSize(); inputs_.clear(); inputs_name_.clear(); outputs_.clear(); outputs_name_.clear(); orderdBlobs_.clear(); bindingsPtr_.clear(); blobsNameMapper_.clear(); for (int i = 0; i < nbBindings; ++i) { /* @ dims = {nbDims=4 d=0x000000c1e77ff2bc {-1, 3, 640, 640, 0, 0, 0, 0} } dims = {nbDims=3 d=0x000000c1e77ff2bc {-1, 25200, 85, 0, 0, 0, 0, 0} } */ auto dims = context->engine_->getBindingDimensions(i); /* @ */ auto type = context->engine_->getBindingDataType(i); /* @ */ const char* bindingName = context->engine_->getBindingName(i); /* @ dims = {nbDims=4 d=0x000000c1e77ff2bc {16, 3, 640, 640, 0, 0, 0, 0} } dims = {nbDims=3 d=0x000000c1e77ff2bc {16, 25200, 85, 0, 0, 0, 0, 0} } */ dims.d[0] = max_batchsize; /* @ tensor */ auto newTensor = make_shared<Tensor>(dims.nbDims, dims.d); /* @ tensor */ newTensor->set_stream(this->context_->stream_); /* @ tensor */ newTensor->set_workspace(this->workspace_); /* @ */ if (context->engine_->bindingIsInput(i)) { //if is input inputs_.push_back(newTensor); inputs_name_.push_back(bindingName); inputs_map_to_ordered_index_.push_back(orderdBlobs_.size()); } else { //if is output outputs_.push_back(newTensor); outputs_name_.push_back(bindingName); outputs_map_to_ordered_index_.push_back(orderdBlobs_.size()); } blobsNameMapper_[bindingName] = i; orderdBlobs_.push_back(newTensor); } bindingsPtr_.resize(orderdBlobs_.size()); } void TRTInferImpl::set_stream(hipStream_t stream){ this->context_->set_stream(stream); for(auto& t : orderdBlobs_) t->set_stream(stream); } hipStream_t TRTInferImpl::get_stream() { return this->context_->stream_; } int TRTInferImpl::device() { return device_; } void TRTInferImpl::synchronize() { checkCudaRuntime(hipStreamSynchronize(context_->stream_)); } bool TRTInferImpl::is_output_name(const std::string& name){ return std::find(outputs_name_.begin(), outputs_name_.end(), name) != outputs_name_.end(); } bool TRTInferImpl::is_input_name(const std::string& name){ return std::find(inputs_name_.begin(), inputs_name_.end(), name) != inputs_name_.end(); } void TRTInferImpl::forward(bool sync) { EngineContext* context = (EngineContext*)context_.get(); int inputBatchSize = inputs_[0]->size(0); for(int i = 0; i < context->engine_->getNbBindings(); ++i){ auto dims = context->engine_->getBindingDimensions(i); auto type = context->engine_->getBindingDataType(i); dims.d[0] = inputBatchSize; if(context->engine_->bindingIsInput(i)){ context->context_->setBindingDimensions(i, dims); } } for (int i = 0; i < outputs_.size(); ++i) { outputs_[i]->resize_single_dim(0, inputBatchSize); outputs_[i]->to_gpu(false); } for (int i = 0; i < orderdBlobs_.size(); ++i) bindingsPtr_[i] = orderdBlobs_[i]->gpu(); void** bindingsptr = bindingsPtr_.data(); //bool execute_result = context->context_->enqueue(inputBatchSize, bindingsptr, context->stream_, nullptr); bool execute_result = context->context_->enqueueV2(bindingsptr, context->stream_, nullptr); if(!execute_result){ auto code = hipGetLastError(); INFOF("execute fail, code %d[%s], message %s", code, hipGetErrorName(code), hipGetErrorString(code)); } if (sync) { synchronize(); } } std::shared_ptr<MixMemory> TRTInferImpl::get_workspace() { return workspace_; } int TRTInferImpl::num_input() { return this->inputs_.size(); } int TRTInferImpl::num_output() { return this->outputs_.size(); } void TRTInferImpl::set_input (int index, std::shared_ptr<Tensor> tensor){ Assert(index >= 0 && index < inputs_.size()); this->inputs_[index] = tensor; int order_index = inputs_map_to_ordered_index_[index]; this->orderdBlobs_[order_index] = tensor; } void TRTInferImpl::set_output(int index, std::shared_ptr<Tensor> tensor){ Assert(index >= 0 && index < outputs_.size()); this->outputs_[index] = tensor; int order_index = outputs_map_to_ordered_index_[index]; this->orderdBlobs_[order_index] = tensor; } std::shared_ptr<Tensor> TRTInferImpl::input(int index) { Assert(index >= 0 && index < inputs_name_.size()); return this->inputs_[index]; } std::string TRTInferImpl::get_input_name(int index){ Assert(index >= 0 && index < inputs_name_.size()); return inputs_name_[index]; } std::shared_ptr<Tensor> TRTInferImpl::output(int index) { Assert(index >= 0 && index < outputs_.size()); return outputs_[index]; } std::string TRTInferImpl::get_output_name(int index){ Assert(index >= 0 && index < outputs_name_.size()); return outputs_name_[index]; } int TRTInferImpl::get_max_batch_size() { Assert(this->context_ != nullptr); return this->context_->engine_->getMaxBatchSize(); } std::shared_ptr<Tensor> TRTInferImpl::tensor(const std::string& name) { Assert(this->blobsNameMapper_.find(name) != this->blobsNameMapper_.end()); return orderdBlobs_[blobsNameMapper_[name]]; } std::shared_ptr<TRTInferImpl> load_infer(const string& file) { /* @ */ std::shared_ptr<TRTInferImpl> infer(new TRTInferImpl()); /* @ trt */ if (!infer->load(file)) infer.reset(); return infer; } //////////////////////////////class MonopolyAllocator////////////////////////////////////// /* @ tensormax_batch * 2tensorquery tensor */ template<class _ItemType> class MonopolyAllocator{ public: class MonopolyData{ public: std::shared_ptr<_ItemType>& data(){ return data_; } void release(){manager_->release_one(this);} private: MonopolyData(MonopolyAllocator* pmanager){manager_ = pmanager;} private: friend class MonopolyAllocator; MonopolyAllocator* manager_ = nullptr; std::shared_ptr<_ItemType> data_; bool available_ = true; }; typedef std::shared_ptr<MonopolyData> MonopolyDataPointer; MonopolyAllocator(int size){ capacity_ = size; num_available_ = size; datas_.resize(size); for(int i = 0; i < size; ++i) datas_[i] = std::shared_ptr<MonopolyData>(new MonopolyData(this)); } virtual ~MonopolyAllocator(){ run_ = false; cv_.notify_all(); std::unique_lock<std::mutex> l(lock_); cv_exit_.wait(l, [&](){ return num_wait_thread_ == 0; }); } MonopolyDataPointer query(int timeout = 10000){ std::unique_lock<std::mutex> l(lock_); if(!run_) return nullptr; if(num_available_ == 0){ num_wait_thread_++; auto state = cv_.wait_for(l, std::chrono::milliseconds(timeout), [&](){ return num_available_ > 0 || !run_; }); num_wait_thread_--; cv_exit_.notify_one(); // timeout, no available, exit program if(!state || num_available_ == 0 || !run_) return nullptr; } auto item = std::find_if(datas_.begin(), datas_.end(), [](MonopolyDataPointer& item){return item->available_;}); if(item == datas_.end()) return nullptr; (*item)->available_ = false; num_available_--; return *item; } int num_available(){ return num_available_; } int capacity(){ return capacity_; } private: void release_one(MonopolyData* prq){ std::unique_lock<std::mutex> l(lock_); if(!prq->available_){ prq->available_ = true; num_available_++; cv_.notify_one(); } } private: std::mutex lock_; std::condition_variable cv_; std::condition_variable cv_exit_; std::vector<MonopolyDataPointer> datas_; int capacity_ = 0; volatile int num_available_ = 0; volatile int num_wait_thread_ = 0; volatile bool run_ = true; }; /////////////////////////////////////////class ThreadSafedAsyncInfer///////////////////////////////////////////// /* @ future */ template<class Input, class Output, class StartParam=std::tuple<std::string, int>, class JobAdditional=int> class ThreadSafedAsyncInfer{ public: /* @ */ struct Job{ Input input; /* @ */ Output output; /* @ */ JobAdditional additional; /* @ */ MonopolyAllocator<Tensor>::MonopolyDataPointer mono_tensor; /* @ tensor */ std::shared_ptr<std::promise<Output>> pro; /* @ promise, */ }; virtual ~ThreadSafedAsyncInfer(){ stop(); } void stop(){ run_ = false; cond_.notify_all(); ////////////////////////////////////////// cleanup jobs { std::unique_lock<std::mutex> l(jobs_lock_); while(!jobs_.empty()){ auto& item = jobs_.front(); if(item.pro) item.pro->set_value(Output()); jobs_.pop(); } }; if(worker_){ worker_->join(); worker_.reset(); } } /* @ */ bool startup(const StartParam& param){ run_ = true; /* @ promisefuture */ std::promise<bool> pro; start_param_ = param; /* @ */ worker_ = std::make_shared<std::thread>(&ThreadSafedAsyncInfer::worker, this, std::ref(pro)); /* @ promisepropro */ return pro.get_future().get(); } virtual std::shared_future<Output> commit(const Input& input){ Job job; job.pro = std::make_shared<std::promise<Output>>(); if(!preprocess(job, input)){ job.pro->set_value(Output()); return job.pro->get_future(); } /////////////////////////////////////////////////////////// { std::unique_lock<std::mutex> l(jobs_lock_); jobs_.push(job); }; cond_.notify_one(); return job.pro->get_future(); } virtual std::vector<std::shared_future<Output>> commits(const std::vector<Input>& inputs){ /* @ batch_size */ int batch_size = ::min((int)inputs.size(), this->tensor_allocator_->capacity()); /* @ jobvectorbatch */ std::vector<Job> jobs(inputs.size()); /* @ vector */ std::vector<std::shared_future<Output>> results(inputs.size()); int nepoch = (inputs.size() + batch_size - 1) / batch_size; for(int epoch = 0; epoch < nepoch; ++epoch){ /* @ */ int begin = epoch * batch_size; int end = ::min((int)inputs.size(), begin + batch_size); /* @ */ for(int i = begin; i < end; ++i){ /* @ Job */ Job& job = jobs[i]; /* @ JObpromise */ job.pro = std::make_shared<std::promise<Output>>(); /* @ job */ if(!preprocess(job, inputs[i])){ job.pro->set_value(Output()); } /* @ job.pro->set_value(image_based_boxes); */ results[i] = job.pro->get_future(); } /////////////////////////////////////////////////////////// { std::unique_lock<std::mutex> l(jobs_lock_); for(int i = begin; i < end; ++i){ jobs_.emplace(std::move(jobs[i])); }; } cond_.notify_one(); } return results; } protected: virtual void worker(std::promise<bool>& result) = 0; virtual bool preprocess(Job& job, const Input& input) = 0; virtual bool get_jobs_and_wait(std::vector<Job>& fetch_jobs, int max_size){ /* @ jobs */ std::unique_lock<std::mutex> l(jobs_lock_); /* @ */ cond_.wait(l, [&](){ return !run_ || !jobs_.empty(); }); if(!run_) return false; /* @ */ fetch_jobs.clear(); /* @ jobs_ fetch_jobs */ for(int i = 0; i < max_size && !jobs_.empty(); ++i){ fetch_jobs.emplace_back(std::move(jobs_.front())); jobs_.pop(); } return true; } virtual bool get_job_and_wait(Job& fetch_job){ std::unique_lock<std::mutex> l(jobs_lock_); cond_.wait(l, [&](){ return !run_ || !jobs_.empty(); }); if(!run_) return false; fetch_job = std::move(jobs_.front()); jobs_.pop(); return true; } protected: StartParam start_param_; std::atomic<bool> run_; /* @ */ std::mutex jobs_lock_; std::queue<Job> jobs_; std::shared_ptr<std::thread> worker_; std::condition_variable cond_; std::shared_ptr<MonopolyAllocator<Tensor>> tensor_allocator_; }; ///////////////////////////////////class YoloTRTInferImpl////////////////////////////////////// /* @ Yolo batch yolo */ const char* type_name(Type type){ switch(type){ case Type::V5: return "YoloV5"; case Type::X: return "YoloX"; default: return "Unknow"; } } struct AffineMatrix{ float i2d[6]; // image to dst(network), 2x3 matrix float d2i[6]; // dst to image, 2x3 matrix void compute(const cv::Size& from, const cv::Size& to){ float scale_x = to.width / (float)from.width; float scale_y = to.height / (float)from.height; float scale = ::min(scale_x, scale_y); i2d[0] = scale; i2d[1] = 0; i2d[2] = -scale * from.width * 0.5 + to.width * 0.5 + scale * 0.5 - 0.5; i2d[3] = 0; i2d[4] = scale; i2d[5] = -scale * from.height * 0.5 + to.height * 0.5 + scale * 0.5 - 0.5; cv::Mat m2x3_i2d(2, 3, CV_32F, i2d); cv::Mat m2x3_d2i(2, 3, CV_32F, d2i); cv::invertAffineTransform(m2x3_i2d, m2x3_d2i); } cv::Mat i2d_mat(){ return cv::Mat(2, 3, CV_32F, i2d); } }; using ThreadSafedAsyncInferImpl = ThreadSafedAsyncInfer < cv::Mat, // input BoxArray, // output tuple<string, int>, // start param AffineMatrix // additional >; class YoloTRTInferImpl : public Infer, public ThreadSafedAsyncInferImpl{ public: /* @ TRTInferImplstopstop */ virtual ~YoloTRTInferImpl(){ stop(); } virtual bool startup(const string& file, Type type, int gpuid, float confidence_threshold, float nms_threshold){ if(type == Type::V5){ /* @ */ normalize_ = Norm::alpha_beta(1 / 255.0f, 0.0f, ChannelType::SwapRB); }else if(type == Type::X){ //float mean[] = {0.485, 0.456, 0.406}; //float std[] = {0.229, 0.224, 0.225}; //normalize_ = Norm::mean_std(mean, std, 1/255.0f, ChannelType::Invert); normalize_ = Norm::None(); }else{ INFOE("Unsupport type %d", type); } confidence_threshold_ = confidence_threshold; nms_threshold_ = nms_threshold; return ThreadSafedAsyncInferImpl::startup(make_tuple(file, gpuid)); } virtual void worker(promise<bool>& result) override{ /* @ GPUid */ string file = get<0>(start_param_); int gpuid = get<1>(start_param_); /* @ GPU */ set_device(gpuid); /* @ cuda, */ auto engine = load_infer(file); if(engine == nullptr){ INFOE("Engine %s load failed", file.c_str()); result.set_value(false); return; } /* @ */ engine->print(); /* @ bbox */ const int MAX_IMAGE_BBOX = 1024; /* @ bbox */ const int NUM_BOX_ELEMENT = 7; // left, top, right, bottom, confidence, class, keepflag /* @ tensor */ Tensor affin_matrix_device; /* @ tensor */ Tensor output_array_device; /* @ */ int max_batch_size = engine->get_max_batch_size(); auto input = engine->tensor("images"); auto output = engine->tensor("output"); int num_classes = output->size(2) - 5; input_width_ = input->size(3); input_height_ = input->size(2); /* @ GPUmax_batch_size * 2 */ tensor_allocator_ = make_shared<MonopolyAllocator<Tensor>>(max_batch_size * 2); stream_ = engine->get_stream(); gpu_ = gpuid; /* @ */ result.set_value(true); input->resize_single_dim(0, max_batch_size).to_gpu(); affin_matrix_device.set_stream(stream_); /* @ 8 8 * sizeof(float) % 32 == 0 */ affin_matrix_device.resize(max_batch_size, 8).to_gpu(); /* @ 1 + MAX_IMAGE_BBOXcounter + bboxes ... */ output_array_device.resize(max_batch_size, 1 + MAX_IMAGE_BBOX * NUM_BOX_ELEMENT).to_gpu(); vector<Job> fetch_jobs; /* @ */ while(get_jobs_and_wait(fetch_jobs, max_batch_size)){ /* @ */ int infer_batch_size = fetch_jobs.size(); input->resize_single_dim(0, infer_batch_size); for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ auto& job = fetch_jobs[ibatch]; auto& mono = job.mono_tensor->data(); affin_matrix_device.copy_from_gpu(affin_matrix_device.offset(ibatch), mono->get_workspace()->gpu(), 6); input->copy_from_gpu(input->offset(ibatch), mono->gpu(), mono->count()); job.mono_tensor->release(); } /* @ */ engine->forward(false); output_array_device.to_gpu(false); /* @ */ for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ auto& job = fetch_jobs[ibatch]; float* image_based_output = output->gpu<float>(ibatch); float* output_array_ptr = output_array_device.gpu<float>(ibatch); auto affine_matrix = affin_matrix_device.gpu<float>(ibatch); checkCudaRuntime(hipMemsetAsync(output_array_ptr, 0, sizeof(int), stream_)); decode_kernel_invoker(image_based_output, output->size(1), num_classes, confidence_threshold_, nms_threshold_, affine_matrix, output_array_ptr, MAX_IMAGE_BBOX, stream_); } output_array_device.to_cpu(); for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ float* parray = output_array_device.cpu<float>(ibatch); int count = min(MAX_IMAGE_BBOX, (int)*parray); auto& job = fetch_jobs[ibatch]; auto& image_based_boxes = job.output; for(int i = 0; i < count; ++i){ float* pbox = parray + 1 + i * NUM_BOX_ELEMENT; int label = pbox[5]; int keepflag = pbox[6]; if(keepflag == 1){ image_based_boxes.emplace_back(pbox[0], pbox[1], pbox[2], pbox[3], pbox[4], label); } } job.pro->set_value(image_based_boxes); } fetch_jobs.clear(); } stream_ = nullptr; tensor_allocator_.reset(); INFO("Engine destroy."); } virtual bool preprocess(Job& job, const Mat& image) override{ if(tensor_allocator_ == nullptr){ INFOE("tensor_allocator_ is nullptr"); return false; } job.mono_tensor = tensor_allocator_->query(); if(job.mono_tensor == nullptr){ INFOE("Tensor allocator query failed."); return false; } /* @ gpu */ AutoDevice auto_device(gpu_); /* @ jobtensornullptr */ auto& tensor = job.mono_tensor->data(); if(tensor == nullptr){ // not init tensor = make_shared<Tensor>(); tensor->set_workspace(make_shared<MixMemory>()); } /* @ shape input_width_input_height_ */ Size input_size(input_width_, input_height_); /* @ */ job.additional.compute(image.size(), input_size); /* @ tensor */ tensor->set_stream(stream_); /* @ tensor resizetensor */ tensor->resize(1, 3, input_height_, input_width_); /* @ GPU */ /* @ */ size_t size_image = image.cols * image.rows * 3; /* @ */ size_t size_matrix = upbound(sizeof(job.additional.d2i), 32); /* @ */ auto workspace = tensor->get_workspace(); /* @ GPU */ uint8_t* gpu_workspace = (uint8_t*)workspace->gpu(size_matrix + size_image); /* @ */ float* affine_matrix_device = (float*)gpu_workspace; /* @ cpu */ uint8_t* image_device = size_matrix + gpu_workspace; uint8_t* cpu_workspace = (uint8_t*)workspace->cpu(size_matrix + size_image); float* affine_matrix_host = (float*)cpu_workspace; uint8_t* image_host = size_matrix + cpu_workspace; //checkCudaRuntime(hipMemcpyAsync(image_host, image.data, size_image, hipMemcpyHostToHost, stream_)); // speed up /* @ */ memcpy(image_host, image.data, size_image); memcpy(affine_matrix_host, job.additional.d2i, sizeof(job.additional.d2i)); checkCudaRuntime(hipMemcpyAsync(image_device, image_host, size_image, hipMemcpyHostToDevice, stream_)); checkCudaRuntime(hipMemcpyAsync(affine_matrix_device, affine_matrix_host, sizeof(job.additional.d2i), hipMemcpyHostToDevice, stream_)); /* @ image_deviceaffine_matrix_device tensor->gpu<float>() */ warp_affine_bilinear_and_normalize_plane( image_device, image.cols * 3, image.cols, image.rows, tensor->gpu<float>(), input_width_, input_height_, affine_matrix_device, 114, normalize_, stream_ ); /* @ tensorjobbatch job inline void* gpu() const { ((Tensor*)this)->to_gpu(); return data_->gpu(); } */ return true; } virtual vector<shared_future<BoxArray>> commits(const vector<Mat>& images) override{ return ThreadSafedAsyncInferImpl::commits(images); } virtual std::shared_future<BoxArray> commit(const Mat& image) override{ return ThreadSafedAsyncInferImpl::commit(image); } private: int input_width_ = 0; int input_height_ = 0; int gpu_ = 0; float confidence_threshold_ = 0; float nms_threshold_ = 0; hipStream_t stream_ = nullptr; Norm normalize_; }; void image_to_tensor(const cv::Mat& image, shared_ptr<Tensor>& tensor, Type type, int ibatch){ Norm normalize; if(type == Type::V5){ normalize = Norm::alpha_beta(1 / 255.0f, 0.0f, ChannelType::SwapRB); }else if(type == Type::X){ //float mean[] = {0.485, 0.456, 0.406}; //float std[] = {0.229, 0.224, 0.225}; //normalize_ = CUDAKernel::Norm::mean_std(mean, std, 1/255.0f, CUDAKernel::ChannelType::Invert); normalize = Norm::None(); }else{ INFOE("Unsupport type %d", type); } Size input_size(tensor->size(3), tensor->size(2)); AffineMatrix affine; affine.compute(image.size(), input_size); size_t size_image = image.cols * image.rows * 3; size_t size_matrix = upbound(sizeof(affine.d2i), 32); auto workspace = tensor->get_workspace(); uint8_t* gpu_workspace = (uint8_t*)workspace->gpu(size_matrix + size_image); float* affine_matrix_device = (float*)gpu_workspace; uint8_t* image_device = size_matrix + gpu_workspace; uint8_t* cpu_workspace = (uint8_t*)workspace->cpu(size_matrix + size_image); float* affine_matrix_host = (float*)cpu_workspace; uint8_t* image_host = size_matrix + cpu_workspace; auto stream = tensor->get_stream(); memcpy(image_host, image.data, size_image); memcpy(affine_matrix_host, affine.d2i, sizeof(affine.d2i)); checkCudaRuntime(hipMemcpyAsync(image_device, image_host, size_image, hipMemcpyHostToDevice, stream)); checkCudaRuntime(hipMemcpyAsync(affine_matrix_device, affine_matrix_host, sizeof(affine.d2i), hipMemcpyHostToDevice, stream)); warp_affine_bilinear_and_normalize_plane( image_device, image.cols * 3, image.cols, image.rows, tensor->gpu<float>(ibatch), input_size.width, input_size.height, affine_matrix_device, 114, normalize, stream ); } shared_ptr<Infer> create_infer(const string& engine_file, Type type, int gpuid, float confidence_threshold, float nms_threshold){ /* @ */ shared_ptr<YoloTRTInferImpl> instance(new YoloTRTInferImpl()); if(!instance->startup(engine_file, type, gpuid, confidence_threshold, nms_threshold)){ instance.reset(); } return instance; } //////////////////////////////////////Compile Model///////////////////////////////////////////////////////////// const char* mode_string(Mode type) { switch (type) { case Mode::FP32: return "FP32"; case Mode::FP16: return "FP16"; case Mode::INT8: return "INT8"; default: return "UnknowCompileMode"; } } typedef std::function<void(int current, int count, const std::vector<std::string>& files, std::shared_ptr<Tensor>& tensor)> Int8Process; class Int8EntropyCalibrator : public IInt8EntropyCalibrator2{ public: Int8EntropyCalibrator(const vector<string>& imagefiles, nvinfer1::Dims dims, const Int8Process& preprocess) { Assert(preprocess != nullptr); this->dims_ = dims; this->allimgs_ = imagefiles; this->preprocess_ = preprocess; this->fromCalibratorData_ = false; files_.resize(dims.d[0]); checkCudaRuntime(hipStreamCreate(&stream_)); } Int8EntropyCalibrator(const vector<uint8_t>& entropyCalibratorData, nvinfer1::Dims dims, const Int8Process& preprocess) { Assert(preprocess != nullptr); this->dims_ = dims; this->entropyCalibratorData_ = entropyCalibratorData; this->preprocess_ = preprocess; this->fromCalibratorData_ = true; files_.resize(dims.d[0]); checkCudaRuntime(hipStreamCreate(&stream_)); } virtual ~Int8EntropyCalibrator(){ checkCudaRuntime(hipStreamDestroy(stream_)); } int getBatchSize() const noexcept { return dims_.d[0]; } bool next() { int batch_size = dims_.d[0]; if (cursor_ + batch_size > allimgs_.size()) return false; int old_cursor = cursor_; for(int i = 0; i < batch_size; ++i) files_[i] = allimgs_[cursor_++]; if (!tensor_){ tensor_.reset(new Tensor(dims_.nbDims, dims_.d)); tensor_->set_stream(stream_); tensor_->set_workspace(make_shared<MixMemory>()); } preprocess_(old_cursor, allimgs_.size(), files_, tensor_); return true; } bool getBatch(void* bindings[], const char* names[], int nbBindings) noexcept { if (!next()) return false; bindings[0] = tensor_->gpu(); return true; } const vector<uint8_t>& getEntropyCalibratorData() { return entropyCalibratorData_; } const void* readCalibrationCache(size_t& length) noexcept { if (fromCalibratorData_) { length = this->entropyCalibratorData_.size(); return this->entropyCalibratorData_.data(); } length = 0; return nullptr; } virtual void writeCalibrationCache(const void* cache, size_t length) noexcept { entropyCalibratorData_.assign((uint8_t*)cache, (uint8_t*)cache + length); } private: Int8Process preprocess_; vector<string> allimgs_; size_t batchCudaSize_ = 0; int cursor_ = 0; nvinfer1::Dims dims_; vector<string> files_; shared_ptr<Tensor> tensor_; vector<uint8_t> entropyCalibratorData_; bool fromCalibratorData_ = false; hipStream_t stream_ = nullptr; }; bool compile( Mode mode, Type type, unsigned int max_batch_size, const string& source_onnx, const string& saveto, size_t max_workspace_size, const std::string& int8_images_folder, const std::string& int8_entropy_calibrator_cache_file) { bool hasEntropyCalibrator = false; vector<uint8_t> entropyCalibratorData; vector<string> entropyCalibratorFiles; auto int8process = [=](int current, int count, const vector<string>& files, shared_ptr<Tensor>& tensor){ for(int i = 0; i < files.size(); ++i){ auto& file = files[i]; INFO("Int8 load %d / %d, %s", current + i + 1, count, file.c_str()); auto image = cv::imread(file); if(image.empty()){ INFOE("Load image failed, %s", file.c_str()); continue; } image_to_tensor(image, tensor, type, i); } tensor->synchronize(); }; if (mode == Mode::INT8) { if (!int8_entropy_calibrator_cache_file.empty()) { if (exists(int8_entropy_calibrator_cache_file)) { entropyCalibratorData = load_file(int8_entropy_calibrator_cache_file); if (entropyCalibratorData.empty()) { INFOE("entropyCalibratorFile is set as: %s, but we read is empty.", int8_entropy_calibrator_cache_file.c_str()); return false; } hasEntropyCalibrator = true; } } if (hasEntropyCalibrator) { if (!int8_images_folder.empty()) { INFOW("int8_images_folder is ignore, when int8_entropy_calibrator_cache_file is set"); } } else { entropyCalibratorFiles = glob_image_files(int8_images_folder); if (entropyCalibratorFiles.empty()) { INFOE("Can not find any images(jpg/png/bmp/jpeg/tiff) from directory: %s", int8_images_folder.c_str()); return false; } if(entropyCalibratorFiles.size() < max_batch_size){ INFOW("Too few images provided, %d[provided] < %d[max batch size], image copy will be performed", entropyCalibratorFiles.size(), max_batch_size); for(int i = entropyCalibratorFiles.size(); i < max_batch_size; ++i) entropyCalibratorFiles.push_back(entropyCalibratorFiles[i % entropyCalibratorFiles.size()]); } } } else { if (hasEntropyCalibrator) { INFOW("int8_entropy_calibrator_cache_file is ignore, when Mode is '%s'", mode_string(mode)); } } INFO("Compile %s %s.", mode_string(mode), source_onnx.c_str()); shared_ptr<IBuilder> builder(createInferBuilder(gLogger), destroy_nvidia_pointer<IBuilder>); if (builder == nullptr) { INFOE("Can not create builder."); return false; } shared_ptr<IBuilderConfig> config(builder->createBuilderConfig(), destroy_nvidia_pointer<IBuilderConfig>); if (mode == Mode::FP16) { if (!builder->platformHasFastFp16()) { INFOW("Platform not have fast fp16 support"); } config->setFlag(BuilderFlag::kFP16); } else if (mode == Mode::INT8) { if (!builder->platformHasFastInt8()) { INFOW("Platform not have fast int8 support"); } config->setFlag(BuilderFlag::kINT8); } shared_ptr<INetworkDefinition> network; shared_ptr<nvonnxparser::IParser> onnxParser; const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); network = shared_ptr<INetworkDefinition>(builder->createNetworkV2(explicitBatch), destroy_nvidia_pointer<INetworkDefinition>); //from onnx is not markOutput onnxParser.reset(nvonnxparser::createParser(*network, gLogger), destroy_nvidia_pointer<nvonnxparser::IParser>); if (onnxParser == nullptr) { INFOE("Can not create parser."); return false; } if (!onnxParser->parseFromFile(source_onnx.c_str(), 1)) { INFOE("Can not parse OnnX file: %s", source_onnx.c_str()); return false; } auto inputTensor = network->getInput(0); auto inputDims = inputTensor->getDimensions(); shared_ptr<Int8EntropyCalibrator> int8Calibrator; if (mode == Mode::INT8) { auto calibratorDims = inputDims; calibratorDims.d[0] = max_batch_size; if (hasEntropyCalibrator) { INFO("Using exist entropy calibrator data[%d bytes]: %s", entropyCalibratorData.size(), int8_entropy_calibrator_cache_file.c_str()); int8Calibrator.reset(new Int8EntropyCalibrator( entropyCalibratorData, calibratorDims, int8process )); } else { INFO("Using image list[%d files]: %s", entropyCalibratorFiles.size(), int8_images_folder.c_str()); int8Calibrator.reset(new Int8EntropyCalibrator( entropyCalibratorFiles, calibratorDims, int8process )); } config->setInt8Calibrator(int8Calibrator.get()); } INFO("Input shape is %s", join_dims(vector<int>(inputDims.d, inputDims.d + inputDims.nbDims)).c_str()); INFO("Set max batch size = %d", max_batch_size); INFO("Set max workspace size = %.2f MB", max_workspace_size / 1024.0f / 1024.0f); int net_num_input = network->getNbInputs(); INFO("Network has %d inputs:", net_num_input); vector<string> input_names(net_num_input); for(int i = 0; i < net_num_input; ++i){ auto tensor = network->getInput(i); auto dims = tensor->getDimensions(); auto dims_str = join_dims(vector<int>(dims.d, dims.d+dims.nbDims)); INFO(" %d.[%s] shape is %s", i, tensor->getName(), dims_str.c_str()); input_names[i] = tensor->getName(); } int net_num_output = network->getNbOutputs(); INFO("Network has %d outputs:", net_num_output); for(int i = 0; i < net_num_output; ++i){ auto tensor = network->getOutput(i); auto dims = tensor->getDimensions(); auto dims_str = join_dims(vector<int>(dims.d, dims.d+dims.nbDims)); INFO(" %d.[%s] shape is %s", i, tensor->getName(), dims_str.c_str()); } int net_num_layers = network->getNbLayers(); INFO("Network has %d layers", net_num_layers); builder->setMaxBatchSize(max_batch_size); config->setMaxWorkspaceSize(max_workspace_size); auto profile = builder->createOptimizationProfile(); for(int i = 0; i < net_num_input; ++i){ auto input = network->getInput(i); auto input_dims = input->getDimensions(); input_dims.d[0] = 1; profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims); profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims); input_dims.d[0] = max_batch_size; profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims); } config->addOptimizationProfile(profile); INFO("Building engine..."); auto time_start = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count(); shared_ptr<ICudaEngine> engine(builder->buildEngineWithConfig(*network, *config), destroy_nvidia_pointer<ICudaEngine>); if (engine == nullptr) { INFOE("engine is nullptr"); return false; } if (mode == Mode::INT8) { if (!hasEntropyCalibrator) { if (!int8_entropy_calibrator_cache_file.empty()) { INFO("Save calibrator to: %s", int8_entropy_calibrator_cache_file.c_str()); save_file(int8_entropy_calibrator_cache_file, int8Calibrator->getEntropyCalibratorData()); } else { INFO("No set entropyCalibratorFile, and entropyCalibrator will not save."); } } } auto time_end = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count(); INFO("Build done %lld ms !", time_end - time_start); // serialize the engine, then close everything down shared_ptr<IHostMemory> seridata(engine->serialize(), destroy_nvidia_pointer<IHostMemory>); return save_file(saveto, seridata->data(), seridata->size()); } };
044692a8356e9921fc7128c954b1dc6322a4b7fa.cu
#include "simple_yolo.hpp" #include <NvInfer.h> #include <NvOnnxParser.h> #include <cuda_runtime.h> #include <algorithm> #include <fstream> #include <memory> #include <string> #include <future> #include <condition_variable> #include <mutex> #include <thread> #include <queue> #if defined(_WIN32) # include <Windows.h> # include <wingdi.h> # include <Shlwapi.h> # pragma comment(lib, "shlwapi.lib") # undef min # undef max #else # include <dirent.h> # include <sys/types.h> # include <sys/stat.h> # include <unistd.h> # include <stdarg.h> #endif namespace SimpleYolo{ using namespace nvinfer1; using namespace std; using namespace cv; #define CURRENT_DEVICE_ID -1 #define GPU_BLOCK_THREADS 512 #define KernelPositionBlock \ int position = (blockDim.x * blockIdx.x + threadIdx.x); \ if (position >= (edge)) return; #define checkCudaRuntime(call) check_runtime(call, #call, __LINE__, __FILE__) static bool check_runtime(cudaError_t e, const char* call, int line, const char *file); #define checkCudaKernel(...) \ __VA_ARGS__; \ do{cudaError_t cudaStatus = cudaPeekAtLastError(); \ if (cudaStatus != cudaSuccess){ \ INFOE("launch failed: %s", cudaGetErrorString(cudaStatus)); \ }} while(0); #define Assert(op) \ do{ \ bool cond = !(!(op)); \ if(!cond){ \ INFOF("Assert failed, " #op); \ } \ }while(false) /* @致青春 修改这个level来实现修改日志输出级别 */ #define CURRENT_LOG_LEVEL LogLevel::Info #define INFOD(...) __log_func(__FILE__, __LINE__, LogLevel::Debug, __VA_ARGS__) #define INFOV(...) __log_func(__FILE__, __LINE__, LogLevel::Verbose, __VA_ARGS__) #define INFO(...) __log_func(__FILE__, __LINE__, LogLevel::Info, __VA_ARGS__) #define INFOW(...) __log_func(__FILE__, __LINE__, LogLevel::Warning, __VA_ARGS__) #define INFOE(...) __log_func(__FILE__, __LINE__, LogLevel::Error, __VA_ARGS__) #define INFOF(...) __log_func(__FILE__, __LINE__, LogLevel::Fatal, __VA_ARGS__) enum class NormType : int{ None = 0, MeanStd = 1, AlphaBeta = 2 }; enum class ChannelType : int{ None = 0, SwapRB = 1 }; /* @致青春 归一化操作,可以支持均值标准差,alpha beta,和swap RB */ struct Norm{ float mean[3]; float std[3]; float alpha, beta; NormType type = NormType::None; ChannelType channel_type = ChannelType::None; // out = (x * alpha - mean) / std static Norm mean_std(const float mean[3], const float std[3], float alpha = 1/255.0f, ChannelType channel_type=ChannelType::None); // out = x * alpha + beta static Norm alpha_beta(float alpha, float beta = 0, ChannelType channel_type=ChannelType::None); // None static Norm None(); }; Norm Norm::mean_std(const float mean[3], const float std[3], float alpha, ChannelType channel_type){ Norm out; out.type = NormType::MeanStd; out.alpha = alpha; out.channel_type = channel_type; memcpy(out.mean, mean, sizeof(out.mean)); memcpy(out.std, std, sizeof(out.std)); return out; } Norm Norm::alpha_beta(float alpha, float beta, ChannelType channel_type){ Norm out; out.type = NormType::AlphaBeta; out.alpha = alpha; out.beta = beta; out.channel_type = channel_type; return out; } Norm Norm::None(){ return Norm(); } /* @致青春 构造时设置当前gpuid,析构时修改为原来的gpuid */ class AutoDevice{ public: AutoDevice(int device_id = 0){ cudaGetDevice(&old_); if(old_ != device_id && device_id != -1) checkCudaRuntime(cudaSetDevice(device_id)); } virtual ~AutoDevice(){ if(old_ != -1) checkCudaRuntime(cudaSetDevice(old_)); } private: int old_ = -1; }; enum class LogLevel : int{ Debug = 5, Verbose = 4, Info = 3, Warning = 2, Error = 1, Fatal = 0 }; static void __log_func(const char* file, int line, LogLevel level, const char* fmt, ...); inline int upbound(int n, int align = 32){return (n + align - 1) / align * align;} static bool check_runtime(cudaError_t e, const char* call, int line, const char *file){ if (e != cudaSuccess) { INFOE("CUDA Runtime error %s # %s, code = %s [ %d ] in file %s:%d", call, cudaGetErrorString(e), cudaGetErrorName(e), e, file, line); return false; } return true; } #define TRT_STR(v) #v #define TRT_VERSION_STRING(major, minor, patch, build) TRT_STR(major) "." TRT_STR(minor) "." TRT_STR(patch) "." TRT_STR(build) const char* trt_version(){ return TRT_VERSION_STRING(NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH, NV_TENSORRT_BUILD); } static bool check_device_id(int device_id){ int device_count = -1; checkCudaRuntime(cudaGetDeviceCount(&device_count)); if(device_id < 0 || device_id >= device_count){ INFOE("Invalid device id: %d, count = %d", device_id, device_count); return false; } return true; } static bool exists(const string& path){ #ifdef _WIN32 return ::PathFileExistsA(path.c_str()); #else return access(path.c_str(), R_OK) == 0; #endif } static const char* level_string(LogLevel level){ switch (level){ case LogLevel::Debug: return "debug"; case LogLevel::Verbose: return "verbo"; case LogLevel::Info: return "info"; case LogLevel::Warning: return "warn"; case LogLevel::Error: return "error"; case LogLevel::Fatal: return "fatal"; default: return "unknow"; } } template<typename _T> static string join_dims(const vector<_T>& dims){ stringstream output; char buf[64]; const char* fmts[] = {"%d", " x %d"}; for(int i = 0; i < dims.size(); ++i){ snprintf(buf, sizeof(buf), fmts[i != 0], dims[i]); output << buf; } return output.str(); } static bool save_file(const string& file, const void* data, size_t length){ FILE* f = fopen(file.c_str(), "wb"); if (!f) return false; if (data and length > 0){ if (fwrite(data, 1, length, f) not_eq length){ fclose(f); return false; } } fclose(f); return true; } static bool save_file(const string& file, const vector<uint8_t>& data){ return save_file(file, data.data(), data.size()); } static string file_name(const string& path, bool include_suffix){ if (path.empty()) return ""; int p = path.rfind('/'); #ifdef U_OS_WINDOWS int e = path.rfind('\\'); p = std::max(p, e); #endif p += 1; //include suffix if (include_suffix) return path.substr(p); int u = path.rfind('.'); if (u == -1) return path.substr(p); if (u <= p) u = path.size(); return path.substr(p, u - p); } /* @致青春 @致青春 遍历文件夹图片 */ vector<string> glob_image_files(const string& directory){ /* @致青春 检索目录下的所有图像:"*.jpg;*.png;*.bmp;*.jpeg;*.tiff" */ vector<string> files, output; set<string> pattern_set{"jpg", "png", "bmp", "jpeg", "tiff"}; if(directory.empty()){ INFOE("Glob images from folder failed, folder is empty"); return output; } try{ cv::glob(directory + "/*", files, true); }catch(...){ INFOE("Glob %s failed", directory.c_str()); return output; } for(int i = 0; i < files.size(); ++i){ auto& file = files[i]; int p = file.rfind("."); if(p == -1) continue; auto suffix = file.substr(p+1); std::transform(suffix.begin(), suffix.end(), suffix.begin(), [](char c){ if(c >= 'A' && c <= 'Z') c -= 'A' + 'a'; return c; }); if(pattern_set.find(suffix) != pattern_set.end()) output.push_back(file); } return output; } static void __log_func(const char* file, int line, LogLevel level, const char* fmt, ...){ if(level > CURRENT_LOG_LEVEL) return; va_list vl; va_start(vl, fmt); char buffer[2048]; string filename = file_name(file, true); int n = snprintf(buffer, sizeof(buffer), "[%s][%s:%d]:", level_string(level), filename.c_str(), line); vsnprintf(buffer + n, sizeof(buffer) - n, fmt, vl); fprintf(stdout, "%s\n", buffer); if (level == LogLevel::Fatal) { fflush(stdout); abort(); } } static dim3 grid_dims(int numJobs) { int numBlockThreads = numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS; return dim3(((numJobs + numBlockThreads - 1) / (float)numBlockThreads)); } static dim3 block_dims(int numJobs) { return numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS; } static int get_device(int device_id){ if(device_id != CURRENT_DEVICE_ID){ check_device_id(device_id); return device_id; } checkCudaRuntime(cudaGetDevice(&device_id)); return device_id; } void set_device(int device_id) { if (device_id == -1) return; checkCudaRuntime(cudaSetDevice(device_id)); } /////////////////////////////CUDA kernels//////////////////////////////////////////////// const int NUM_BOX_ELEMENT = 7; // left, top, right, bottom, confidence, class, keepflag static __device__ void affine_project(float* matrix, float x, float y, float* ox, float* oy){ *ox = matrix[0] * x + matrix[1] * y + matrix[2]; *oy = matrix[3] * x + matrix[4] * y + matrix[5]; } /* @致青春 @致青春 解码核函数 */ static __global__ void decode_kernel(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float* invert_affine_matrix, float* parray, int max_objects){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= num_bboxes) return; float* pitem = predict + (5 + num_classes) * position; float objectness = pitem[4]; if(objectness < confidence_threshold) return; float* class_confidence = pitem + 5; float confidence = *class_confidence++; int label = 0; for(int i = 1; i < num_classes; ++i, ++class_confidence){ if(*class_confidence > confidence){ confidence = *class_confidence; label = i; } } confidence *= objectness; if(confidence < confidence_threshold) return; int index = atomicAdd(parray, 1); if(index >= max_objects) return; float cx = *pitem++; float cy = *pitem++; float width = *pitem++; float height = *pitem++; float left = cx - width * 0.5f; float top = cy - height * 0.5f; float right = cx + width * 0.5f; float bottom = cy + height * 0.5f; affine_project(invert_affine_matrix, left, top, &left, &top); affine_project(invert_affine_matrix, right, bottom, &right, &bottom); float* pout_item = parray + 1 + index * NUM_BOX_ELEMENT; *pout_item++ = left; *pout_item++ = top; *pout_item++ = right; *pout_item++ = bottom; *pout_item++ = confidence; *pout_item++ = label; *pout_item++ = 1; // 1 = keep, 0 = ignore } static __device__ float box_iou( float aleft, float atop, float aright, float abottom, float bleft, float btop, float bright, float bbottom ){ float cleft = max(aleft, bleft); float ctop = max(atop, btop); float cright = min(aright, bright); float cbottom = min(abottom, bbottom); float c_area = max(cright - cleft, 0.0f) * max(cbottom - ctop, 0.0f); if(c_area == 0.0f) return 0.0f; float a_area = max(0.0f, aright - aleft) * max(0.0f, abottom - atop); float b_area = max(0.0f, bright - bleft) * max(0.0f, bbottom - btop); return c_area / (a_area + b_area - c_area); } static __global__ void fast_nms_kernel(float* bboxes, int max_objects, float threshold){ int position = (blockDim.x * blockIdx.x + threadIdx.x); int count = min((int)*bboxes, max_objects); if (position >= count) return; // left, top, right, bottom, confidence, class, keepflag float* pcurrent = bboxes + 1 + position * NUM_BOX_ELEMENT; for(int i = 0; i < count; ++i){ float* pitem = bboxes + 1 + i * NUM_BOX_ELEMENT; if(i == position || pcurrent[5] != pitem[5]) continue; if(pitem[4] >= pcurrent[4]){ if(pitem[4] == pcurrent[4] && i < position) continue; float iou = box_iou( pcurrent[0], pcurrent[1], pcurrent[2], pcurrent[3], pitem[0], pitem[1], pitem[2], pitem[3] ); if(iou > threshold){ pcurrent[6] = 0; // 1=keep, 0=ignore return; } } } } static void decode_kernel_invoker(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, cudaStream_t stream){ auto grid = grid_dims(num_bboxes); auto block = block_dims(num_bboxes); /* @致青春 如果核函数有波浪线,没关系,他是正常的,你只是看不顺眼罢了 */ checkCudaKernel(decode_kernel<<<grid, block, 0, stream>>>(predict, num_bboxes, num_classes, confidence_threshold, invert_affine_matrix, parray, max_objects)); grid = grid_dims(max_objects); block = block_dims(max_objects); checkCudaKernel(fast_nms_kernel<<<grid, block, 0, stream>>>(parray, max_objects, nms_threshold)); } /* @致青春 @致青春 数据预处理 */ static __global__ void warp_affine_bilinear_and_normalize_plane_kernel(uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, uint8_t const_value_st, float* warp_affine_matrix_2_3, Norm norm, int edge){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; float m_x1 = warp_affine_matrix_2_3[0]; float m_y1 = warp_affine_matrix_2_3[1]; float m_z1 = warp_affine_matrix_2_3[2]; float m_x2 = warp_affine_matrix_2_3[3]; float m_y2 = warp_affine_matrix_2_3[4]; float m_z2 = warp_affine_matrix_2_3[5]; int dx = position % dst_width; int dy = position / dst_width; float src_x = m_x1 * dx + m_y1 * dy + m_z1; float src_y = m_x2 * dx + m_y2 * dy + m_z2; float c0, c1, c2; if(src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height){ // out of range c0 = const_value_st; c1 = const_value_st; c2 = const_value_st; }else{ int y_low = floorf(src_y); int x_low = floorf(src_x); int y_high = y_low + 1; int x_high = x_low + 1; uint8_t const_value[] = {const_value_st, const_value_st, const_value_st}; float ly = src_y - y_low; float lx = src_x - x_low; float hy = 1 - ly; float hx = 1 - lx; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; uint8_t* v1 = const_value; uint8_t* v2 = const_value; uint8_t* v3 = const_value; uint8_t* v4 = const_value; if(y_low >= 0){ if (x_low >= 0) v1 = src + y_low * src_line_size + x_low * 3; if (x_high < src_width) v2 = src + y_low * src_line_size + x_high * 3; } if(y_high < src_height){ if (x_low >= 0) v3 = src + y_high * src_line_size + x_low * 3; if (x_high < src_width) v4 = src + y_high * src_line_size + x_high * 3; } // same to opencv c0 = floorf(w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0] + 0.5f); c1 = floorf(w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1] + 0.5f); c2 = floorf(w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2] + 0.5f); } if(norm.channel_type == ChannelType::SwapRB){ float t = c2; c2 = c0; c0 = t; } if(norm.type == NormType::MeanStd){ c0 = (c0 * norm.alpha - norm.mean[0]) / norm.std[0]; c1 = (c1 * norm.alpha - norm.mean[1]) / norm.std[1]; c2 = (c2 * norm.alpha - norm.mean[2]) / norm.std[2]; }else if(norm.type == NormType::AlphaBeta){ c0 = c0 * norm.alpha + norm.beta; c1 = c1 * norm.alpha + norm.beta; c2 = c2 * norm.alpha + norm.beta; } int area = dst_width * dst_height; float* pdst_c0 = dst + dy * dst_width + dx; float* pdst_c1 = pdst_c0 + area; float* pdst_c2 = pdst_c1 + area; *pdst_c0 = c0; *pdst_c1 = c1; *pdst_c2 = c2; } static void warp_affine_bilinear_and_normalize_plane( uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, float* matrix_2_3, uint8_t const_value, const Norm& norm, cudaStream_t stream) { int jobs = dst_width * dst_height; auto grid = grid_dims(jobs); auto block = block_dims(jobs); checkCudaKernel(warp_affine_bilinear_and_normalize_plane_kernel << <grid, block, 0, stream >> > ( src, src_line_size, src_width, src_height, dst, dst_width, dst_height, const_value, matrix_2_3, norm, jobs )); } //////////////////////////////class MixMemory///////////////////////////////////////////////// /* @致青春 gpu/cpu内存管理 自动对gpu和cpu内存进行分配和释放 这里的cpu使用的是pinned memory,当对gpu做内存复制时,性能比较好 因为是cudaMallocHost分配的,因此他与cuda context有关联 @致青春 内存分配的重点在于,CPU和GPU可以互相copy和创建,通常情况下,创建一块内存,首先应该具备以下要求: 1. 知道指向内存的指针 2. 开辟内存块的大小 3. GPU内存的id号 4. 可以直接引用外部内存块 通过上面我们可以知道,设计类的出发点应该是需要定义几个变量,然后写方法分别实现我们想要的功能如cpu->gpu, gpu->cpu等等操作 中间要考虑内存的复用,内存copy的性能等细节,这里大神基本都注意到了值得学习 因此下面的MixMemory类,就需要着重观察私用成员变量: void* cpu_ = nullptr; size_t cpu_size_ = 0; bool owner_cpu_ = true; int device_id_ = 0; void* gpu_ = nullptr; size_t gpu_size_ = 0; bool owner_gpu_ = true; 通过观察私用成员变量的和成员方法可以很快理解MixMemory */ class MixMemory { public: /* @致青春 构造和析构函数 */ MixMemory(int device_id = CURRENT_DEVICE_ID); MixMemory(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size); virtual ~MixMemory(); /* @致青春 申请gpu内存和cpu内存 */ void* gpu(size_t size); void* cpu(size_t size); /* @致青春 释放内存 */ void release_gpu(); void release_cpu(); void release_all(); /* @致青春 获取所用权 */ inline bool owner_gpu() const{return owner_gpu_;} inline bool owner_cpu() const{return owner_cpu_;} /* @致青春 获取申请内存的大小 */ inline size_t cpu_size() const{return cpu_size_;} inline size_t gpu_size() const{return gpu_size_;} /* @致青春 获取设备id */ inline int device_id() const{return device_id_;} /* @致青春 获取GPU内存地址 */ inline void* gpu() const { return gpu_; } // Pinned Memory inline void* cpu() const { return cpu_; } void reference_data(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size); private: /* @致青春 cpu指针 */ void* cpu_ = nullptr; /* @致青春 cpu申请空间大小 大小 */ size_t cpu_size_ = 0; bool owner_cpu_ = true; /* @致青春 GPU 0 */ int device_id_ = 0; /* @致青春 GPU指针 */ void* gpu_ = nullptr; /* @致青春 GPU申请空间大小 */ size_t gpu_size_ = 0; bool owner_gpu_ = true; }; MixMemory::MixMemory(int device_id){ device_id_ = get_device(device_id); } /* @致青春 传入CPU地址和GPU地址以及对应的大小对其进行初始化 */ MixMemory::MixMemory(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size){ reference_data(cpu, cpu_size, gpu, gpu_size); } /* @致青春 引用数据 */ void MixMemory::reference_data(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size){ release_all(); if(cpu == nullptr || cpu_size == 0){ cpu = nullptr; cpu_size = 0; } if(gpu == nullptr || gpu_size == 0){ gpu = nullptr; gpu_size = 0; } /* @致青春 把传入进来的参数进行赋值 */ this->cpu_ = cpu; this->cpu_size_ = cpu_size; this->gpu_ = gpu; this->gpu_size_ = gpu_size; /* @致青春 下面两行代码有什么作用呢? */ /* @致青春 大神解释: 可以允许MixMemory引用一块内存,不属于自己管理,但是可以引用 */ this->owner_cpu_ = !(cpu && cpu_size > 0); this->owner_gpu_ = !(gpu && gpu_size > 0); checkCudaRuntime(cudaGetDevice(&device_id_)); } MixMemory::~MixMemory() { release_all(); } void* MixMemory::gpu(size_t size) { /* @致青春 这里判断需要开辟的空间size,和之前的开辟空间的size大小比较,如果小,则直接返回即可 如果大则需要重新开辟空间,先释放已分配的空间,然后开辟新空间,同时把新空间设置为0 */ if (gpu_size_ < size) { release_gpu(); gpu_size_ = size; AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(cudaMalloc(&gpu_, size)); checkCudaRuntime(cudaMemset(gpu_, 0, size)); } return gpu_; } void* MixMemory::cpu(size_t size) { if (cpu_size_ < size) { release_cpu(); cpu_size_ = size; AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(cudaMallocHost(&cpu_, size)); Assert(cpu_ != nullptr); memset(cpu_, 0, size); } return cpu_; } /* @致青春 释放CPU资源 */ void MixMemory::release_cpu() { if (cpu_) { if(owner_cpu_){ AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(cudaFreeHost(cpu_)); } cpu_ = nullptr; } cpu_size_ = 0; } /* @致青春 释放GPU资源 */ void MixMemory::release_gpu() { if (gpu_) { if(owner_gpu_){ AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(cudaFree(gpu_)); } gpu_ = nullptr; } gpu_size_ = 0; } /* @致青春 释放所以资源 */ void MixMemory::release_all() { release_cpu(); release_gpu(); } /////////////////////////////////class Tensor//////////////////////////////////////////////// /* @致青春 Tensor类,实现张量的管理 由于NN多用张量,必须有个类进行管理才方便,实现内存自动分配,计算索引等等 如果要调试,可以执行save_to_file,储存为文件后,在python中加载并查看 */ enum class DataHead : int{ Init = 0, Device = 1, Host = 2 }; class Tensor { public: Tensor(const Tensor& other) = delete; Tensor& operator = (const Tensor& other) = delete; /* @致青春 构造和析构函数 */ explicit Tensor(std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(int n, int c, int h, int w, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(int ndims, const int* dims, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(const std::vector<int>& dims, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); virtual ~Tensor(); int numel() const; inline int ndims() const{return shape_.size();} /* @致青春 获取维度 */ inline int size(int index) const{return shape_[index];} /* @致青春 获取某一维维度的大小 */ inline int shape(int index) const{return shape_[index];} /* @致青春 获取某一维维度的大小 */ /* @致青春 获取维度的相关信息 */ inline int batch() const{return shape_[0];} inline int channel() const{return shape_[1];} inline int height() const{return shape_[2];} inline int width() const{return shape_[3];} inline const std::vector<int>& dims() const { return shape_; } inline int bytes() const { return bytes_; } inline int bytes(int start_axis) const { return count(start_axis) * element_size(); } /* @致青春 获取数据所占字节数 */ inline int element_size() const { return sizeof(float); } inline DataHead head() const { return head_; } /* @致青春 判断是GPU数据还是cpu数据还是初始化 */ std::shared_ptr<Tensor> clone() const; Tensor& release(); /* @致青春 释放资源 */ Tensor& set_to(float value); bool empty() const; /* @致青春 判断数据是否为空 */ /* @致青春 tensor的数据偏置索引 */ template<typename ... _Args> int offset(int index, _Args ... index_args) const{ const int index_array[] = {index, index_args...}; return offset_array(sizeof...(index_args) + 1, index_array); } int offset_array(const std::vector<int>& index) const; int offset_array(size_t size, const int* index_array) const; template<typename ... _Args> Tensor& resize(int dim_size, _Args ... dim_size_args){ const int dim_size_array[] = {dim_size, dim_size_args...}; return resize(sizeof...(dim_size_args) + 1, dim_size_array); } Tensor& resize(int ndims, const int* dims); Tensor& resize(const std::vector<int>& dims); Tensor& resize_single_dim(int idim, int size); int count(int start_axis = 0) const; int device() const{return device_id_;} /* @致青春 把数据copy到GPU上或者copy到CPU上 */ Tensor& to_gpu(bool copy=true); Tensor& to_cpu(bool copy=true); /* @致青春 把数据copy到GPU上或者copy到CPU上 */ inline void* cpu() const { ((Tensor*)this)->to_cpu(); return data_->cpu(); } inline void* gpu() const { ((Tensor*)this)->to_gpu(); return data_->gpu(); } /* @致青春 创建模板进行泛化编程, */ template<typename DType> inline const DType* cpu() const { return (DType*)cpu(); } template<typename DType> inline DType* cpu() { return (DType*)cpu(); } /* @致青春 变长模板参数 ,具体可以访问 :https://blog.csdn.net/zj510/article/details/36633603?spm=1001.2101.3001.6650.10&utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7EBlogCommendFromBaidu%7Edefault-10.highlightwordscore&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7EBlogCommendFromBaidu%7Edefault-10.highlightwordscore */ /* @致青春 这里应是数据切片时使用的,下面的GPU类似 */ template<typename DType, typename ... _Args> inline DType* cpu(int i, _Args&& ... args) { return cpu<DType>() + offset(i, args...); } template<typename DType> inline const DType* gpu() const { return (DType*)gpu(); } template<typename DType> inline DType* gpu() { return (DType*)gpu(); } template<typename DType, typename ... _Args> inline DType* gpu(int i, _Args&& ... args) { return gpu<DType>() + offset(i, args...); } template<typename DType, typename ... _Args> inline DType& at(int i, _Args&& ... args) { return *(cpu<DType>() + offset(i, args...)); } /* @致青春 获取数据和空间 */ std::shared_ptr<MixMemory> get_data() const {return data_;} std::shared_ptr<MixMemory> get_workspace() const {return workspace_;} Tensor& set_workspace(std::shared_ptr<MixMemory> workspace) {workspace_ = workspace; return *this;} /* @致青春 获取流和设置流 */ cudaStream_t get_stream() const{return stream_;} Tensor& set_stream(cudaStream_t stream){stream_ = stream; return *this;} Tensor& set_mat (int n, const cv::Mat& image); Tensor& set_norm_mat(int n, const cv::Mat& image, float mean[3], float std[3]); /* @致青春 参数cpu<float>(n, c),使用了可变长参数的功能 ,这里是获取一段数据,这里需要深挖,先放放 */ cv::Mat at_mat(int n = 0, int c = 0) { return cv::Mat(height(), width(), CV_32F, cpu<float>(n, c)); } /* @致青春 设置流为异步执行 */ Tensor& synchronize(); const char* shape_string() const{return shape_string_;} const char* descriptor() const; /* @致青春 这部分很复杂,需要多理解 */ Tensor& copy_from_gpu(size_t offset, const void* src, size_t num_element, int device_id = CURRENT_DEVICE_ID); /** # 以下代码是python中加载Tensor import numpy as np def load_tensor(file): with open(file, "rb") as f: binary_data = f.read() magic_number, ndims, dtype = np.frombuffer(binary_data, np.uint32, count=3, offset=0) assert magic_number == 0xFCCFE2E2, f"{file} not a tensor file." dims = np.frombuffer(binary_data, np.uint32, count=ndims, offset=3 * 4) if dtype == 0: np_dtype = np.float32 elif dtype == 1: np_dtype = np.float16 else: assert False, f"Unsupport dtype = {dtype}, can not convert to numpy dtype" return np.frombuffer(binary_data, np_dtype, offset=(ndims + 3) * 4).reshape(*dims) **/ bool save_to_file(const std::string& file) const; private: Tensor& compute_shape_string(); Tensor& adajust_memory_by_update_dims_or_type(); void setup_data(std::shared_ptr<MixMemory> data); private: /* @致青春 tensor的shape */ std::vector<int> shape_; /* @致青春 tensor所占的空间大小 */ size_t bytes_ = 0; /* @致青春 数据头 包含三部分,初始化、CPU、GPU */ DataHead head_ = DataHead::Init; /* @致青春 创建流的声明 */ cudaStream_t stream_ = nullptr; int device_id_ = 0; char shape_string_[100]; char descriptor_string_[100]; /* @致青春 MixMemory获取内存或者显存 */ std::shared_ptr<MixMemory> data_; std::shared_ptr<MixMemory> workspace_; }; Tensor::Tensor(int n, int c, int h, int w, shared_ptr<MixMemory> data, int device_id) { this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(n, c, h, w); } Tensor::Tensor(const std::vector<int>& dims, shared_ptr<MixMemory> data, int device_id){ this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(dims); } Tensor::Tensor(int ndims, const int* dims, shared_ptr<MixMemory> data, int device_id) { this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(ndims, dims); } Tensor::Tensor(shared_ptr<MixMemory> data, int device_id){ shape_string_[0] = 0; descriptor_string_[0] = 0; this->device_id_ = get_device(device_id); setup_data(data); } Tensor::~Tensor() { release(); } const char* Tensor::descriptor() const{ char* descriptor_ptr = (char*)descriptor_string_; int device_id = device(); snprintf(descriptor_ptr, sizeof(descriptor_string_), "Tensor:%p, %s, CUDA:%d", data_.get(), shape_string_, device_id ); return descriptor_ptr; } Tensor& Tensor::compute_shape_string(){ // clean string shape_string_[0] = 0; char* buffer = shape_string_; size_t buffer_size = sizeof(shape_string_); for(int i = 0; i < shape_.size(); ++i){ int size = 0; if(i < shape_.size() - 1) size = snprintf(buffer, buffer_size, "%d x ", shape_[i]); else size = snprintf(buffer, buffer_size, "%d", shape_[i]); buffer += size; buffer_size -= size; } return *this; } /* @致青春 这里把cpu内存和GPU内存分配放到一起 */ void Tensor::setup_data(shared_ptr<MixMemory> data){ data_ = data; if(data_ == nullptr){ data_ = make_shared<MixMemory>(device_id_); }else{ device_id_ = data_->device_id(); } head_ = DataHead::Init; if(data_->cpu()){ head_ = DataHead::Host; } if(data_->gpu()){ head_ = DataHead::Device; } } Tensor& Tensor::copy_from_gpu(size_t offset, const void* src, size_t num_element, int device_id){ if(head_ == DataHead::Init) to_gpu(false); size_t offset_location = offset * element_size(); if(offset_location >= bytes_){ INFOE("Offset location[%lld] >= bytes_[%lld], out of range", offset_location, bytes_); return *this; } size_t copyed_bytes = num_element * element_size(); size_t remain_bytes = bytes_ - offset_location; if(copyed_bytes > remain_bytes){ INFOE("Copyed bytes[%lld] > remain bytes[%lld], out of range", copyed_bytes, remain_bytes); return *this; } if(head_ == DataHead::Device){ int current_device_id = get_device(device_id); int gpu_device_id = device(); if(current_device_id != gpu_device_id){ checkCudaRuntime(cudaMemcpyPeerAsync(gpu<unsigned char>() + offset_location, gpu_device_id, src, current_device_id, copyed_bytes, stream_)); //checkCudaRuntime(cudaMemcpyAsync(gpu<unsigned char>() + offset_location, src, copyed_bytes, cudaMemcpyDeviceToDevice, stream_)); } else{ checkCudaRuntime(cudaMemcpyAsync(gpu<unsigned char>() + offset_location, src, copyed_bytes, cudaMemcpyDeviceToDevice, stream_)); } }else if(head_ == DataHead::Host){ AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(cudaMemcpyAsync(cpu<unsigned char>() + offset_location, src, copyed_bytes, cudaMemcpyDeviceToHost, stream_)); }else{ INFOE("Unsupport head type %d", head_); } return *this; } Tensor& Tensor::release() { data_->release_all(); shape_.clear(); bytes_ = 0; head_ = DataHead::Init; return *this; } bool Tensor::empty() const{ return data_->cpu() == nullptr && data_->gpu() == nullptr; } int Tensor::count(int start_axis) const { if(start_axis >= 0 && start_axis < shape_.size()){ int size = 1; for (int i = start_axis; i < shape_.size(); ++i) size *= shape_[i]; return size; }else{ return 0; } } Tensor& Tensor::resize(const std::vector<int>& dims) { return resize(dims.size(), dims.data()); } int Tensor::numel() const{ int value = shape_.empty() ? 0 : 1; for(int i = 0; i < shape_.size(); ++i){ value *= shape_[i]; } return value; } Tensor& Tensor::resize_single_dim(int idim, int size){ Assert(idim >= 0 && idim < shape_.size()); auto new_shape = shape_; new_shape[idim] = size; return resize(new_shape); } Tensor& Tensor::resize(int ndims, const int* dims) { vector<int> setup_dims(ndims); for(int i = 0; i < ndims; ++i){ int dim = dims[i]; if(dim == -1){ Assert(ndims == shape_.size()); dim = shape_[i]; } setup_dims[i] = dim; } this->shape_ = setup_dims; this->adajust_memory_by_update_dims_or_type(); this->compute_shape_string(); return *this; } Tensor& Tensor::adajust_memory_by_update_dims_or_type(){ int needed_size = this->numel() * element_size(); if(needed_size > this->bytes_){ head_ = DataHead::Init; } this->bytes_ = needed_size; return *this; } Tensor& Tensor::synchronize(){ AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(cudaStreamSynchronize(stream_)); return *this; } /* @致青春 先开辟需要大小的gpu空间,然后初始化为0 ,然后把cpu的数据转换为GPU */ Tensor& Tensor::to_gpu(bool copy) { /* @致青春 如果已经是GPU的数据,直接返回即可 */ if (head_ == DataHead::Device) return *this; /* @致青春 先更新数据头信息为GPU,然后开辟GPU空间,初始化为0 */ head_ = DataHead::Device; data_->gpu(bytes_); /* @致青春 在确定数据不为空的情况下,把数据copy到GPU上,cpu同理 */ if (copy && data_->cpu() != nullptr) { AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(cudaMemcpyAsync(data_->gpu(), data_->cpu(), bytes_, cudaMemcpyHostToDevice, stream_)); } return *this; } Tensor& Tensor::to_cpu(bool copy) { if (head_ == DataHead::Host) return *this; head_ = DataHead::Host; data_->cpu(bytes_); if (copy && data_->gpu() != nullptr) { AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(cudaMemcpyAsync(data_->cpu(), data_->gpu(), bytes_, cudaMemcpyDeviceToHost, stream_)); checkCudaRuntime(cudaStreamSynchronize(stream_)); } return *this; } int Tensor::offset_array(size_t size, const int* index_array) const{ Assert(size <= shape_.size()); int value = 0; for(int i = 0; i < shape_.size(); ++i){ if(i < size) value += index_array[i]; if(i + 1 < shape_.size()) value *= shape_[i+1]; } return value; } int Tensor::offset_array(const std::vector<int>& index_array) const{ return offset_array(index_array.size(), index_array.data()); } bool Tensor::save_to_file(const std::string& file) const{ if(empty()) return false; FILE* f = fopen(file.c_str(), "wb"); if(f == nullptr) return false; int ndims = this->ndims(); int dtype_ = 0; unsigned int head[3] = {0xFCCFE2E2, ndims, static_cast<unsigned int>(dtype_)}; fwrite(head, 1, sizeof(head), f); fwrite(shape_.data(), 1, sizeof(shape_[0]) * shape_.size(), f); fwrite(cpu(), 1, bytes_, f); fclose(f); return true; } /////////////////////////////////class TRTInferImpl//////////////////////////////////////////////// class Logger : public ILogger { public: virtual void log(Severity severity, const char* msg) noexcept override { if (severity == Severity::kINTERNAL_ERROR) { INFOE("NVInfer INTERNAL_ERROR: %s", msg); abort(); }else if (severity == Severity::kERROR) { INFOE("NVInfer: %s", msg); } else if (severity == Severity::kWARNING) { INFOW("NVInfer: %s", msg); } else if (severity == Severity::kINFO) { INFOD("NVInfer: %s", msg); } else { INFOD("%s", msg); } } }; static Logger gLogger; template<typename _T> static void destroy_nvidia_pointer(_T* ptr) { if (ptr) ptr->destroy(); } /* @致青春 这个类就是构建模型的,和tensorrt官方的教程差不多, 只是这里不使用默认流,使用创建的流进行执行 */ class EngineContext { public: virtual ~EngineContext() { destroy(); } void set_stream(cudaStream_t stream){ if(owner_stream_){ if (stream_) {cudaStreamDestroy(stream_);} owner_stream_ = false; } stream_ = stream; } bool build_model(const void* pdata, size_t size) { destroy(); if(pdata == nullptr || size == 0) return false; owner_stream_ = true; /* @致青春 创建流 */ checkCudaRuntime(cudaStreamCreate(&stream_)); if(stream_ == nullptr) return false; /* @致青春 下面就是标准的tensorrt的反序列化流程,不懂的可以看看官网的教程即可 其中runtime_,engine_,context_都是类的内置变量, */ runtime_ = shared_ptr<IRuntime>(createInferRuntime(gLogger), destroy_nvidia_pointer<IRuntime>); if (runtime_ == nullptr) return false; engine_ = shared_ptr<ICudaEngine>(runtime_->deserializeCudaEngine(pdata, size, nullptr), destroy_nvidia_pointer<ICudaEngine>); if (engine_ == nullptr) return false; //runtime_->setDLACore(0); context_ = shared_ptr<IExecutionContext>(engine_->createExecutionContext(), destroy_nvidia_pointer<IExecutionContext>); return context_ != nullptr; } private: void destroy() { context_.reset(); engine_.reset(); runtime_.reset(); if(owner_stream_){ if (stream_) {cudaStreamDestroy(stream_);} } stream_ = nullptr; } public: cudaStream_t stream_ = nullptr; bool owner_stream_ = false; shared_ptr<IExecutionContext> context_; shared_ptr<ICudaEngine> engine_; shared_ptr<IRuntime> runtime_ = nullptr; }; /* @致青春 这里不知道大家是否存在一个疑问就是为什么很多类名称后面都有Impl结尾,这里送大家一个单词implementation,意思是实施 执行 通常存在多态的情况下,抽象类的具体实现,即这个类是具体实现的意思 @PeterHuang 这是一个很重要的实现类,具体实现在这里 @Kx Wang 我支持楼上 @希望 我也支持楼上 */ class TRTInferImpl{ public: virtual ~TRTInferImpl(); bool load(const std::string& file); // @Peter 加载模型 @希望 注意这里用的是string bool load_from_memory(const void* pdata, size_t size); void destroy(); void forward(bool sync); int get_max_batch_size(); cudaStream_t get_stream(); void set_stream(cudaStream_t stream); void synchronize(); size_t get_device_memory_size(); std::shared_ptr<MixMemory> get_workspace(); std::shared_ptr<Tensor> input(int index = 0); std::string get_input_name(int index = 0); std::shared_ptr<Tensor> output(int index = 0); std::string get_output_name(int index = 0); std::shared_ptr<Tensor> tensor(const std::string& name); bool is_output_name(const std::string& name); bool is_input_name(const std::string& name); void set_input (int index, std::shared_ptr<Tensor> tensor); void set_output(int index, std::shared_ptr<Tensor> tensor); std::shared_ptr<std::vector<uint8_t>> serial_engine(); void print(); int num_output(); int num_input(); int device(); private: void build_engine_input_and_outputs_mapper(); private: std::vector<std::shared_ptr<Tensor>> inputs_; std::vector<std::shared_ptr<Tensor>> outputs_; std::vector<int> inputs_map_to_ordered_index_; std::vector<int> outputs_map_to_ordered_index_; std::vector<std::string> inputs_name_; std::vector<std::string> outputs_name_; std::vector<std::shared_ptr<Tensor>> orderdBlobs_; std::map<std::string, int> blobsNameMapper_; std::shared_ptr<EngineContext> context_; std::vector<void*> bindingsPtr_; std::shared_ptr<MixMemory> workspace_; int device_ = 0; }; //////////////////////////////////////////////////////////////////////////////////// TRTInferImpl::~TRTInferImpl(){ destroy(); } void TRTInferImpl::destroy() { int old_device = 0; checkCudaRuntime(cudaGetDevice(&old_device)); checkCudaRuntime(cudaSetDevice(device_)); this->context_.reset(); this->blobsNameMapper_.clear(); this->outputs_.clear(); this->inputs_.clear(); this->inputs_name_.clear(); this->outputs_name_.clear(); checkCudaRuntime(cudaSetDevice(old_device)); } void TRTInferImpl::print(){ if(!context_){ INFOW("Infer print, nullptr."); return; } INFO("Infer %p detail", this); INFO("\tMax Batch Size: %d", this->get_max_batch_size()); INFO("\tInputs: %d", inputs_.size()); for(int i = 0; i < inputs_.size(); ++i){ auto& tensor = inputs_[i]; auto& name = inputs_name_[i]; INFO("\t\t%d.%s : shape {%s}", i, name.c_str(), tensor->shape_string()); } INFO("\tOutputs: %d", outputs_.size()); for(int i = 0; i < outputs_.size(); ++i){ auto& tensor = outputs_[i]; auto& name = outputs_name_[i]; INFO("\t\t%d.%s : shape {%s}", i, name.c_str(), tensor->shape_string()); } } std::shared_ptr<std::vector<uint8_t>> TRTInferImpl::serial_engine() { auto memory = this->context_->engine_->serialize(); auto output = make_shared<std::vector<uint8_t>>((uint8_t*)memory->data(), (uint8_t*)memory->data()+memory->size()); memory->destroy(); return output; } bool TRTInferImpl::load_from_memory(const void* pdata, size_t size) { if (pdata == nullptr || size == 0) return false; context_.reset(new EngineContext()); //build model if (!context_->build_model(pdata, size)) { context_.reset(); return false; } workspace_.reset(new MixMemory()); cudaGetDevice(&device_); build_engine_input_and_outputs_mapper(); return true; } static std::vector<uint8_t> load_file(const string& file){ ifstream in(file, ios::in | ios::binary); if (!in.is_open()) return {}; in.seekg(0, ios::end); size_t length = in.tellg(); std::vector<uint8_t> data; if (length > 0){ in.seekg(0, ios::beg); data.resize(length); in.read((char*)&data[0], length); } in.close(); return data; } bool TRTInferImpl::load(const std::string& file) { /* @致青春 反序列化数据 */ auto data = load_file(file); if (data.empty()) return false; context_.reset(new EngineContext()); //build model if (!context_->build_model(data.data(), data.size())) { context_.reset(); return false; } workspace_.reset(new MixMemory()); cudaGetDevice(&device_); /* @致青春 输入输出结果绑定或者是映射 */ build_engine_input_and_outputs_mapper(); return true; } size_t TRTInferImpl::get_device_memory_size() { EngineContext* context = (EngineContext*)this->context_.get(); return context->context_->getEngine().getDeviceMemorySize(); } void TRTInferImpl::build_engine_input_and_outputs_mapper() { /* @致青春 引擎执行上下文 */ EngineContext* context = (EngineContext*)this->context_.get(); /* @致青春 获取输入输出的个数 */ int nbBindings = context->engine_->getNbBindings(); /* @致青春 获取最大的batch */ int max_batchsize = context->engine_->getMaxBatchSize(); inputs_.clear(); inputs_name_.clear(); outputs_.clear(); outputs_name_.clear(); orderdBlobs_.clear(); bindingsPtr_.clear(); blobsNameMapper_.clear(); for (int i = 0; i < nbBindings; ++i) { /* @致青春 获取维度dims = {nbDims=4 d=0x000000c1e77ff2bc {-1, 3, 640, 640, 0, 0, 0, 0} }, dims = {nbDims=3 d=0x000000c1e77ff2bc {-1, 25200, 85, 0, 0, 0, 0, 0} } */ auto dims = context->engine_->getBindingDimensions(i); /* @致青春 获取数据类型 */ auto type = context->engine_->getBindingDataType(i); /* @致青春 获取绑定的名称 */ const char* bindingName = context->engine_->getBindingName(i); /* @致青春 设置批次 dims = {nbDims=4 d=0x000000c1e77ff2bc {16, 3, 640, 640, 0, 0, 0, 0} } , dims = {nbDims=3 d=0x000000c1e77ff2bc {16, 25200, 85, 0, 0, 0, 0, 0} } */ dims.d[0] = max_batchsize; /* @致青春 创建tensor */ auto newTensor = make_shared<Tensor>(dims.nbDims, dims.d); /* @致青春 把模型的流和tensor绑定 */ newTensor->set_stream(this->context_->stream_); /* @致青春 给tensor开辟空间 */ newTensor->set_workspace(this->workspace_); /* @致青春 判断是输入还是输出 */ if (context->engine_->bindingIsInput(i)) { //if is input inputs_.push_back(newTensor); inputs_name_.push_back(bindingName); inputs_map_to_ordered_index_.push_back(orderdBlobs_.size()); } else { //if is output outputs_.push_back(newTensor); outputs_name_.push_back(bindingName); outputs_map_to_ordered_index_.push_back(orderdBlobs_.size()); } blobsNameMapper_[bindingName] = i; orderdBlobs_.push_back(newTensor); } bindingsPtr_.resize(orderdBlobs_.size()); } void TRTInferImpl::set_stream(cudaStream_t stream){ this->context_->set_stream(stream); for(auto& t : orderdBlobs_) t->set_stream(stream); } cudaStream_t TRTInferImpl::get_stream() { return this->context_->stream_; } int TRTInferImpl::device() { return device_; } void TRTInferImpl::synchronize() { checkCudaRuntime(cudaStreamSynchronize(context_->stream_)); } bool TRTInferImpl::is_output_name(const std::string& name){ return std::find(outputs_name_.begin(), outputs_name_.end(), name) != outputs_name_.end(); } bool TRTInferImpl::is_input_name(const std::string& name){ return std::find(inputs_name_.begin(), inputs_name_.end(), name) != inputs_name_.end(); } void TRTInferImpl::forward(bool sync) { EngineContext* context = (EngineContext*)context_.get(); int inputBatchSize = inputs_[0]->size(0); for(int i = 0; i < context->engine_->getNbBindings(); ++i){ auto dims = context->engine_->getBindingDimensions(i); auto type = context->engine_->getBindingDataType(i); dims.d[0] = inputBatchSize; if(context->engine_->bindingIsInput(i)){ context->context_->setBindingDimensions(i, dims); } } for (int i = 0; i < outputs_.size(); ++i) { outputs_[i]->resize_single_dim(0, inputBatchSize); outputs_[i]->to_gpu(false); } for (int i = 0; i < orderdBlobs_.size(); ++i) bindingsPtr_[i] = orderdBlobs_[i]->gpu(); void** bindingsptr = bindingsPtr_.data(); //bool execute_result = context->context_->enqueue(inputBatchSize, bindingsptr, context->stream_, nullptr); bool execute_result = context->context_->enqueueV2(bindingsptr, context->stream_, nullptr); if(!execute_result){ auto code = cudaGetLastError(); INFOF("execute fail, code %d[%s], message %s", code, cudaGetErrorName(code), cudaGetErrorString(code)); } if (sync) { synchronize(); } } std::shared_ptr<MixMemory> TRTInferImpl::get_workspace() { return workspace_; } int TRTInferImpl::num_input() { return this->inputs_.size(); } int TRTInferImpl::num_output() { return this->outputs_.size(); } void TRTInferImpl::set_input (int index, std::shared_ptr<Tensor> tensor){ Assert(index >= 0 && index < inputs_.size()); this->inputs_[index] = tensor; int order_index = inputs_map_to_ordered_index_[index]; this->orderdBlobs_[order_index] = tensor; } void TRTInferImpl::set_output(int index, std::shared_ptr<Tensor> tensor){ Assert(index >= 0 && index < outputs_.size()); this->outputs_[index] = tensor; int order_index = outputs_map_to_ordered_index_[index]; this->orderdBlobs_[order_index] = tensor; } std::shared_ptr<Tensor> TRTInferImpl::input(int index) { Assert(index >= 0 && index < inputs_name_.size()); return this->inputs_[index]; } std::string TRTInferImpl::get_input_name(int index){ Assert(index >= 0 && index < inputs_name_.size()); return inputs_name_[index]; } std::shared_ptr<Tensor> TRTInferImpl::output(int index) { Assert(index >= 0 && index < outputs_.size()); return outputs_[index]; } std::string TRTInferImpl::get_output_name(int index){ Assert(index >= 0 && index < outputs_name_.size()); return outputs_name_[index]; } int TRTInferImpl::get_max_batch_size() { Assert(this->context_ != nullptr); return this->context_->engine_->getMaxBatchSize(); } std::shared_ptr<Tensor> TRTInferImpl::tensor(const std::string& name) { Assert(this->blobsNameMapper_.find(name) != this->blobsNameMapper_.end()); return orderdBlobs_[blobsNameMapper_[name]]; } std::shared_ptr<TRTInferImpl> load_infer(const string& file) { /* @致青春 实例化一个推理对象 */ std::shared_ptr<TRTInferImpl> infer(new TRTInferImpl()); /* @致青春 加载trt文件,并反序列化,这里包含了模型的输入输出的绑定和流的设定 */ if (!infer->load(file)) infer.reset(); return infer; } //////////////////////////////class MonopolyAllocator////////////////////////////////////// /* @致青春 独占分配器 通过对tensor做独占管理,具有max_batch * 2个tensor,通过query获取一个 当推理结束后,该tensor释放使用权,即可交给下一个图像使用,内存实现复用 */ template<class _ItemType> class MonopolyAllocator{ public: class MonopolyData{ public: std::shared_ptr<_ItemType>& data(){ return data_; } void release(){manager_->release_one(this);} private: MonopolyData(MonopolyAllocator* pmanager){manager_ = pmanager;} private: friend class MonopolyAllocator; MonopolyAllocator* manager_ = nullptr; std::shared_ptr<_ItemType> data_; bool available_ = true; }; typedef std::shared_ptr<MonopolyData> MonopolyDataPointer; MonopolyAllocator(int size){ capacity_ = size; num_available_ = size; datas_.resize(size); for(int i = 0; i < size; ++i) datas_[i] = std::shared_ptr<MonopolyData>(new MonopolyData(this)); } virtual ~MonopolyAllocator(){ run_ = false; cv_.notify_all(); std::unique_lock<std::mutex> l(lock_); cv_exit_.wait(l, [&](){ return num_wait_thread_ == 0; }); } MonopolyDataPointer query(int timeout = 10000){ std::unique_lock<std::mutex> l(lock_); if(!run_) return nullptr; if(num_available_ == 0){ num_wait_thread_++; auto state = cv_.wait_for(l, std::chrono::milliseconds(timeout), [&](){ return num_available_ > 0 || !run_; }); num_wait_thread_--; cv_exit_.notify_one(); // timeout, no available, exit program if(!state || num_available_ == 0 || !run_) return nullptr; } auto item = std::find_if(datas_.begin(), datas_.end(), [](MonopolyDataPointer& item){return item->available_;}); if(item == datas_.end()) return nullptr; (*item)->available_ = false; num_available_--; return *item; } int num_available(){ return num_available_; } int capacity(){ return capacity_; } private: void release_one(MonopolyData* prq){ std::unique_lock<std::mutex> l(lock_); if(!prq->available_){ prq->available_ = true; num_available_++; cv_.notify_one(); } } private: std::mutex lock_; std::condition_variable cv_; std::condition_variable cv_exit_; std::vector<MonopolyDataPointer> datas_; int capacity_ = 0; volatile int num_available_ = 0; volatile int num_wait_thread_ = 0; volatile bool run_ = true; }; /////////////////////////////////////////class ThreadSafedAsyncInfer///////////////////////////////////////////// /* @致青春 异步线程安全的推理器 通过异步线程启动,使得调用方允许任意线程调用把图像做输入,并通过future来获取异步结果 */ template<class Input, class Output, class StartParam=std::tuple<std::string, int>, class JobAdditional=int> class ThreadSafedAsyncInfer{ public: /* @致青春 定义结构体的目的是便于接收模板传入的参数和后面的使用方便 */ struct Job{ Input input; /* @致青春 输入相关参数 */ Output output; /* @致青春 输出相关参数 */ JobAdditional additional; /* @致青春 预处理和后处理相关矩阵 */ MonopolyAllocator<Tensor>::MonopolyDataPointer mono_tensor; /* @致青春 独一的tensor */ std::shared_ptr<std::promise<Output>> pro; /* @致青春 promise,获取相关结果使用的 */ }; virtual ~ThreadSafedAsyncInfer(){ stop(); } void stop(){ run_ = false; cond_.notify_all(); ////////////////////////////////////////// cleanup jobs { std::unique_lock<std::mutex> l(jobs_lock_); while(!jobs_.empty()){ auto& item = jobs_.front(); if(item.pro) item.pro->set_value(Output()); jobs_.pop(); } }; if(worker_){ worker_->join(); worker_.reset(); } } /* @致青春 开始启动,主要功能是启动完成,等待结果 */ bool startup(const StartParam& param){ run_ = true; /* @致青春 这里使用的promise和future的目的只是通知模型加载和参数配置完成,等待后面的数据图片任务 */ std::promise<bool> pro; start_param_ = param; /* @致青春 开启线程,完成初始化工作后,等待预处理完成的图片,然后进行推理工作 */ worker_ = std::make_shared<std::thread>(&ThreadSafedAsyncInfer::worker, this, std::ref(pro)); /* @致青春 主线程来到这里会阻塞,阻塞来源上面的promise的pro对象,需要等待pro对象的返回 */ return pro.get_future().get(); } virtual std::shared_future<Output> commit(const Input& input){ Job job; job.pro = std::make_shared<std::promise<Output>>(); if(!preprocess(job, input)){ job.pro->set_value(Output()); return job.pro->get_future(); } /////////////////////////////////////////////////////////// { std::unique_lock<std::mutex> l(jobs_lock_); jobs_.push(job); }; cond_.notify_one(); return job.pro->get_future(); } virtual std::vector<std::shared_future<Output>> commits(const std::vector<Input>& inputs){ /* @致青春 batch_size的大小 */ int batch_size = std::min((int)inputs.size(), this->tensor_allocator_->capacity()); /* @致青春 创建一个job的vector,因此使用的是batch进行推理即多张图片的推理 */ std::vector<Job> jobs(inputs.size()); /* @致青春 创建一个输出结果接收vector */ std::vector<std::shared_future<Output>> results(inputs.size()); int nepoch = (inputs.size() + batch_size - 1) / batch_size; for(int epoch = 0; epoch < nepoch; ++epoch){ /* @致青春 输入图片 */ int begin = epoch * batch_size; int end = std::min((int)inputs.size(), begin + batch_size); /* @致青春 遍历图片 */ for(int i = begin; i < end; ++i){ /* @致青春 实例化一个Job对象,用作数据的传输 */ Job& job = jobs[i]; /* @致青春 每一张图片都对应这个JOb的结构体,这里对promise进行实例化填充 */ job.pro = std::make_shared<std::promise<Output>>(); /* @致青春 开始进行预处理,其中job包含了所需的参数数据,到预处理进行填充或者使用 */ if(!preprocess(job, inputs[i])){ job.pro->set_value(Output()); } /* @致青春 把图片的结果进行保存,来源这里解码的job.pro->set_value(image_based_boxes); */ results[i] = job.pro->get_future(); } /////////////////////////////////////////////////////////// { std::unique_lock<std::mutex> l(jobs_lock_); for(int i = begin; i < end; ++i){ jobs_.emplace(std::move(jobs[i])); }; } cond_.notify_one(); } return results; } protected: virtual void worker(std::promise<bool>& result) = 0; virtual bool preprocess(Job& job, const Input& input) = 0; virtual bool get_jobs_and_wait(std::vector<Job>& fetch_jobs, int max_size){ /* @致青春 定义一个互斥量锁,目的是当存在多线程同时获取jobs队列的数据时的安全保护机制,但是该工程只有当前线程,因此不存在竞争关系 */ std::unique_lock<std::mutex> l(jobs_lock_); /* @致青春 等待唤醒后 */ cond_.wait(l, [&](){ return !run_ || !jobs_.empty(); }); if(!run_) return false; /* @致青春 唤醒后开始工作 */ fetch_jobs.clear(); /* @致青春 把jobs_队列 里的数据填充到fetch_jobs, 供后面处理 */ for(int i = 0; i < max_size && !jobs_.empty(); ++i){ fetch_jobs.emplace_back(std::move(jobs_.front())); jobs_.pop(); } return true; } virtual bool get_job_and_wait(Job& fetch_job){ std::unique_lock<std::mutex> l(jobs_lock_); cond_.wait(l, [&](){ return !run_ || !jobs_.empty(); }); if(!run_) return false; fetch_job = std::move(jobs_.front()); jobs_.pop(); return true; } protected: StartParam start_param_; std::atomic<bool> run_; /* @致青春 原子操作 */ std::mutex jobs_lock_; std::queue<Job> jobs_; std::shared_ptr<std::thread> worker_; std::condition_variable cond_; std::shared_ptr<MonopolyAllocator<Tensor>> tensor_allocator_; }; ///////////////////////////////////class YoloTRTInferImpl////////////////////////////////////// /* @致青春 Yolo的具体实现 通过上述类的特性,实现预处理的计算重叠、异步垮线程调用,最终拼接为多个图为一个batch进行推理。最大化的利用 显卡性能,实现高性能高可用好用的yolo推理 */ const char* type_name(Type type){ switch(type){ case Type::V5: return "YoloV5"; case Type::X: return "YoloX"; default: return "Unknow"; } } struct AffineMatrix{ float i2d[6]; // image to dst(network), 2x3 matrix float d2i[6]; // dst to image, 2x3 matrix void compute(const cv::Size& from, const cv::Size& to){ float scale_x = to.width / (float)from.width; float scale_y = to.height / (float)from.height; float scale = std::min(scale_x, scale_y); i2d[0] = scale; i2d[1] = 0; i2d[2] = -scale * from.width * 0.5 + to.width * 0.5 + scale * 0.5 - 0.5; i2d[3] = 0; i2d[4] = scale; i2d[5] = -scale * from.height * 0.5 + to.height * 0.5 + scale * 0.5 - 0.5; cv::Mat m2x3_i2d(2, 3, CV_32F, i2d); cv::Mat m2x3_d2i(2, 3, CV_32F, d2i); cv::invertAffineTransform(m2x3_i2d, m2x3_d2i); } cv::Mat i2d_mat(){ return cv::Mat(2, 3, CV_32F, i2d); } }; using ThreadSafedAsyncInferImpl = ThreadSafedAsyncInfer < cv::Mat, // input BoxArray, // output tuple<string, int>, // start param AffineMatrix // additional >; class YoloTRTInferImpl : public Infer, public ThreadSafedAsyncInferImpl{ public: /* @致青春 要求在TRTInferImpl里面执行stop,而不是在基类执行stop */ virtual ~YoloTRTInferImpl(){ stop(); } virtual bool startup(const string& file, Type type, int gpuid, float confidence_threshold, float nms_threshold){ if(type == Type::V5){ /* @致青春 归一化,获取归一化的参数,这里可以设置归一化参数 */ normalize_ = Norm::alpha_beta(1 / 255.0f, 0.0f, ChannelType::SwapRB); }else if(type == Type::X){ //float mean[] = {0.485, 0.456, 0.406}; //float std[] = {0.229, 0.224, 0.225}; //normalize_ = Norm::mean_std(mean, std, 1/255.0f, ChannelType::Invert); normalize_ = Norm::None(); }else{ INFOE("Unsupport type %d", type); } confidence_threshold_ = confidence_threshold; nms_threshold_ = nms_threshold; return ThreadSafedAsyncInferImpl::startup(make_tuple(file, gpuid)); } virtual void worker(promise<bool>& result) override{ /* @致青春 解析传入的参数,分别是模型路径和调用GPUid号 */ string file = get<0>(start_param_); int gpuid = get<1>(start_param_); /* @致青春 设置使用GPU */ set_device(gpuid); /* @致青春 加载模型反序列化,绑定cuda流,绑定输入输出等操作 */ auto engine = load_infer(file); if(engine == nullptr){ INFOE("Engine %s load failed", file.c_str()); result.set_value(false); return; } /* @致青春 打印引擎相关信息 */ engine->print(); /* @致青春 设置bbox的最大数 */ const int MAX_IMAGE_BBOX = 1024; /* @致青春 每个bbox的携带的数据 */ const int NUM_BOX_ELEMENT = 7; // left, top, right, bottom, confidence, class, keepflag /* @致青春 定义一个仿射矩阵的tensor */ Tensor affin_matrix_device; /* @致青春 定义一个输出的tensor */ Tensor output_array_device; /* @致青春 获取引擎的相关信息 */ int max_batch_size = engine->get_max_batch_size(); auto input = engine->tensor("images"); auto output = engine->tensor("output"); int num_classes = output->size(2) - 5; input_width_ = input->size(3); input_height_ = input->size(2); /* @致青春 分配GPU显存,显存的大小为max_batch_size * 2 */ tensor_allocator_ = make_shared<MonopolyAllocator<Tensor>>(max_batch_size * 2); stream_ = engine->get_stream(); gpu_ = gpuid; /* @致青春 执行下面的代码,会使得主线程继续执行, 在这里设置阻塞的原因,可能设计者任务初始化会慢于任务的到来 */ result.set_value(true); input->resize_single_dim(0, max_batch_size).to_gpu(); affin_matrix_device.set_stream(stream_); /* @致青春 这里8个值的目的是保证 8 * sizeof(float) % 32 == 0 */ affin_matrix_device.resize(max_batch_size, 8).to_gpu(); /* @致青春 这里的 1 + MAX_IMAGE_BBOX结构是,counter + bboxes ... */ output_array_device.resize(max_batch_size, 1 + MAX_IMAGE_BBOX * NUM_BOX_ELEMENT).to_gpu(); vector<Job> fetch_jobs; /* @致青春 上面的准备工作做完后,将等待预处理后的图片过来,进行处理 */ while(get_jobs_and_wait(fetch_jobs, max_batch_size)){ /* @致青春 一旦进来说明有图片数据 ,获取图片的张数 */ int infer_batch_size = fetch_jobs.size(); input->resize_single_dim(0, infer_batch_size); for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ auto& job = fetch_jobs[ibatch]; auto& mono = job.mono_tensor->data(); affin_matrix_device.copy_from_gpu(affin_matrix_device.offset(ibatch), mono->get_workspace()->gpu(), 6); input->copy_from_gpu(input->offset(ibatch), mono->gpu(), mono->count()); job.mono_tensor->release(); } /* @致青春 开始推理 */ engine->forward(false); output_array_device.to_gpu(false); /* @致青春 下面进行解码,解码后面在详细研究 */ for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ auto& job = fetch_jobs[ibatch]; float* image_based_output = output->gpu<float>(ibatch); float* output_array_ptr = output_array_device.gpu<float>(ibatch); auto affine_matrix = affin_matrix_device.gpu<float>(ibatch); checkCudaRuntime(cudaMemsetAsync(output_array_ptr, 0, sizeof(int), stream_)); decode_kernel_invoker(image_based_output, output->size(1), num_classes, confidence_threshold_, nms_threshold_, affine_matrix, output_array_ptr, MAX_IMAGE_BBOX, stream_); } output_array_device.to_cpu(); for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ float* parray = output_array_device.cpu<float>(ibatch); int count = min(MAX_IMAGE_BBOX, (int)*parray); auto& job = fetch_jobs[ibatch]; auto& image_based_boxes = job.output; for(int i = 0; i < count; ++i){ float* pbox = parray + 1 + i * NUM_BOX_ELEMENT; int label = pbox[5]; int keepflag = pbox[6]; if(keepflag == 1){ image_based_boxes.emplace_back(pbox[0], pbox[1], pbox[2], pbox[3], pbox[4], label); } } job.pro->set_value(image_based_boxes); } fetch_jobs.clear(); } stream_ = nullptr; tensor_allocator_.reset(); INFO("Engine destroy."); } virtual bool preprocess(Job& job, const Mat& image) override{ if(tensor_allocator_ == nullptr){ INFOE("tensor_allocator_ is nullptr"); return false; } job.mono_tensor = tensor_allocator_->query(); if(job.mono_tensor == nullptr){ INFOE("Tensor allocator query failed."); return false; } /* @致青春 配置gpu */ AutoDevice auto_device(gpu_); /* @致青春 获取job里面的tensor的数据地址,第一次为nullptr */ auto& tensor = job.mono_tensor->data(); if(tensor == nullptr){ // not init tensor = make_shared<Tensor>(); tensor->set_workspace(make_shared<MixMemory>()); } /* @致青春 获取输入模型的shape, input_width_和input_height_在模型创建时从模型获取 */ Size input_size(input_width_, input_height_); /* @致青春 把当前的图片大小和模型所需的大小,输入进去获取仿射变换的矩阵 */ job.additional.compute(image.size(), input_size); /* @致青春 把tensor和流绑定,后续都会使用这个流进行处理,流的创建也是在模型创建时创建 */ tensor->set_stream(stream_); /* @致青春 把tensor resize一下,此时的tensor还未填充数据 */ tensor->resize(1, 3, input_height_, input_width_); /* @致青春 GPU的显存设置 主要考虑的是仿射矩阵和图片数据的传输,这里需要深入理为什么这样做? */ /* @致青春 获取图片的大小 */ size_t size_image = image.cols * image.rows * 3; /* @致青春 获取仿射矩阵的大小,同时进行字节对齐 */ size_t size_matrix = upbound(sizeof(job.additional.d2i), 32); /* @致青春 获取创建内存的对象 */ auto workspace = tensor->get_workspace(); /* @致青春 创建GPU显存,并返回起始地址,同时获取的空间大小是图片和仿射矩阵一起的大小 */ uint8_t* gpu_workspace = (uint8_t*)workspace->gpu(size_matrix + size_image); /* @致青春 这里显存填充数据是通过先填充仿射矩阵的,在填充图片的数据,那么起始位置应该是仿射矩阵的地址,因此如下 */ float* affine_matrix_device = (float*)gpu_workspace; /* @致青春 显存起始地址加上仿射矩阵地址就是图片的地址,因此如下,下面的cpu的类似 */ uint8_t* image_device = size_matrix + gpu_workspace; uint8_t* cpu_workspace = (uint8_t*)workspace->cpu(size_matrix + size_image); float* affine_matrix_host = (float*)cpu_workspace; uint8_t* image_host = size_matrix + cpu_workspace; //checkCudaRuntime(cudaMemcpyAsync(image_host, image.data, size_image, cudaMemcpyHostToHost, stream_)); // speed up /* @致青春 具体的拷贝上述说明相同 */ memcpy(image_host, image.data, size_image); memcpy(affine_matrix_host, job.additional.d2i, sizeof(job.additional.d2i)); checkCudaRuntime(cudaMemcpyAsync(image_device, image_host, size_image, cudaMemcpyHostToDevice, stream_)); checkCudaRuntime(cudaMemcpyAsync(affine_matrix_device, affine_matrix_host, sizeof(job.additional.d2i), cudaMemcpyHostToDevice, stream_)); /* @致青春 这里将开始进行仿射变换其中输入的主要是image_device和affine_matrix_device, 输出主要是tensor->gpu<float>() */ warp_affine_bilinear_and_normalize_plane( image_device, image.cols * 3, image.cols, image.rows, tensor->gpu<float>(), input_width_, input_height_, affine_matrix_device, 114, normalize_, stream_ ); /* @致青春 这里还需要说明一下 tensor的最终地址还是job里的地址,只是这块地址是固定的,两个batch的大小,因此这里处理完就结束了,但是 数据已经在job里了 inline void* gpu() const { ((Tensor*)this)->to_gpu(); return data_->gpu(); } */ return true; } virtual vector<shared_future<BoxArray>> commits(const vector<Mat>& images) override{ return ThreadSafedAsyncInferImpl::commits(images); } virtual std::shared_future<BoxArray> commit(const Mat& image) override{ return ThreadSafedAsyncInferImpl::commit(image); } private: int input_width_ = 0; int input_height_ = 0; int gpu_ = 0; float confidence_threshold_ = 0; float nms_threshold_ = 0; cudaStream_t stream_ = nullptr; Norm normalize_; }; void image_to_tensor(const cv::Mat& image, shared_ptr<Tensor>& tensor, Type type, int ibatch){ Norm normalize; if(type == Type::V5){ normalize = Norm::alpha_beta(1 / 255.0f, 0.0f, ChannelType::SwapRB); }else if(type == Type::X){ //float mean[] = {0.485, 0.456, 0.406}; //float std[] = {0.229, 0.224, 0.225}; //normalize_ = CUDAKernel::Norm::mean_std(mean, std, 1/255.0f, CUDAKernel::ChannelType::Invert); normalize = Norm::None(); }else{ INFOE("Unsupport type %d", type); } Size input_size(tensor->size(3), tensor->size(2)); AffineMatrix affine; affine.compute(image.size(), input_size); size_t size_image = image.cols * image.rows * 3; size_t size_matrix = upbound(sizeof(affine.d2i), 32); auto workspace = tensor->get_workspace(); uint8_t* gpu_workspace = (uint8_t*)workspace->gpu(size_matrix + size_image); float* affine_matrix_device = (float*)gpu_workspace; uint8_t* image_device = size_matrix + gpu_workspace; uint8_t* cpu_workspace = (uint8_t*)workspace->cpu(size_matrix + size_image); float* affine_matrix_host = (float*)cpu_workspace; uint8_t* image_host = size_matrix + cpu_workspace; auto stream = tensor->get_stream(); memcpy(image_host, image.data, size_image); memcpy(affine_matrix_host, affine.d2i, sizeof(affine.d2i)); checkCudaRuntime(cudaMemcpyAsync(image_device, image_host, size_image, cudaMemcpyHostToDevice, stream)); checkCudaRuntime(cudaMemcpyAsync(affine_matrix_device, affine_matrix_host, sizeof(affine.d2i), cudaMemcpyHostToDevice, stream)); warp_affine_bilinear_and_normalize_plane( image_device, image.cols * 3, image.cols, image.rows, tensor->gpu<float>(ibatch), input_size.width, input_size.height, affine_matrix_device, 114, normalize, stream ); } shared_ptr<Infer> create_infer(const string& engine_file, Type type, int gpuid, float confidence_threshold, float nms_threshold){ /* @致青春 创建一个推理实例,该实例具备了引擎的创建、加载模型,反序列化,创建线程等一系列操作, */ shared_ptr<YoloTRTInferImpl> instance(new YoloTRTInferImpl()); if(!instance->startup(engine_file, type, gpuid, confidence_threshold, nms_threshold)){ instance.reset(); } return instance; } //////////////////////////////////////Compile Model///////////////////////////////////////////////////////////// const char* mode_string(Mode type) { switch (type) { case Mode::FP32: return "FP32"; case Mode::FP16: return "FP16"; case Mode::INT8: return "INT8"; default: return "UnknowCompileMode"; } } typedef std::function<void(int current, int count, const std::vector<std::string>& files, std::shared_ptr<Tensor>& tensor)> Int8Process; class Int8EntropyCalibrator : public IInt8EntropyCalibrator2{ public: Int8EntropyCalibrator(const vector<string>& imagefiles, nvinfer1::Dims dims, const Int8Process& preprocess) { Assert(preprocess != nullptr); this->dims_ = dims; this->allimgs_ = imagefiles; this->preprocess_ = preprocess; this->fromCalibratorData_ = false; files_.resize(dims.d[0]); checkCudaRuntime(cudaStreamCreate(&stream_)); } Int8EntropyCalibrator(const vector<uint8_t>& entropyCalibratorData, nvinfer1::Dims dims, const Int8Process& preprocess) { Assert(preprocess != nullptr); this->dims_ = dims; this->entropyCalibratorData_ = entropyCalibratorData; this->preprocess_ = preprocess; this->fromCalibratorData_ = true; files_.resize(dims.d[0]); checkCudaRuntime(cudaStreamCreate(&stream_)); } virtual ~Int8EntropyCalibrator(){ checkCudaRuntime(cudaStreamDestroy(stream_)); } int getBatchSize() const noexcept { return dims_.d[0]; } bool next() { int batch_size = dims_.d[0]; if (cursor_ + batch_size > allimgs_.size()) return false; int old_cursor = cursor_; for(int i = 0; i < batch_size; ++i) files_[i] = allimgs_[cursor_++]; if (!tensor_){ tensor_.reset(new Tensor(dims_.nbDims, dims_.d)); tensor_->set_stream(stream_); tensor_->set_workspace(make_shared<MixMemory>()); } preprocess_(old_cursor, allimgs_.size(), files_, tensor_); return true; } bool getBatch(void* bindings[], const char* names[], int nbBindings) noexcept { if (!next()) return false; bindings[0] = tensor_->gpu(); return true; } const vector<uint8_t>& getEntropyCalibratorData() { return entropyCalibratorData_; } const void* readCalibrationCache(size_t& length) noexcept { if (fromCalibratorData_) { length = this->entropyCalibratorData_.size(); return this->entropyCalibratorData_.data(); } length = 0; return nullptr; } virtual void writeCalibrationCache(const void* cache, size_t length) noexcept { entropyCalibratorData_.assign((uint8_t*)cache, (uint8_t*)cache + length); } private: Int8Process preprocess_; vector<string> allimgs_; size_t batchCudaSize_ = 0; int cursor_ = 0; nvinfer1::Dims dims_; vector<string> files_; shared_ptr<Tensor> tensor_; vector<uint8_t> entropyCalibratorData_; bool fromCalibratorData_ = false; cudaStream_t stream_ = nullptr; }; bool compile( Mode mode, Type type, unsigned int max_batch_size, const string& source_onnx, const string& saveto, size_t max_workspace_size, const std::string& int8_images_folder, const std::string& int8_entropy_calibrator_cache_file) { bool hasEntropyCalibrator = false; vector<uint8_t> entropyCalibratorData; vector<string> entropyCalibratorFiles; auto int8process = [=](int current, int count, const vector<string>& files, shared_ptr<Tensor>& tensor){ for(int i = 0; i < files.size(); ++i){ auto& file = files[i]; INFO("Int8 load %d / %d, %s", current + i + 1, count, file.c_str()); auto image = cv::imread(file); if(image.empty()){ INFOE("Load image failed, %s", file.c_str()); continue; } image_to_tensor(image, tensor, type, i); } tensor->synchronize(); }; if (mode == Mode::INT8) { if (!int8_entropy_calibrator_cache_file.empty()) { if (exists(int8_entropy_calibrator_cache_file)) { entropyCalibratorData = load_file(int8_entropy_calibrator_cache_file); if (entropyCalibratorData.empty()) { INFOE("entropyCalibratorFile is set as: %s, but we read is empty.", int8_entropy_calibrator_cache_file.c_str()); return false; } hasEntropyCalibrator = true; } } if (hasEntropyCalibrator) { if (!int8_images_folder.empty()) { INFOW("int8_images_folder is ignore, when int8_entropy_calibrator_cache_file is set"); } } else { entropyCalibratorFiles = glob_image_files(int8_images_folder); if (entropyCalibratorFiles.empty()) { INFOE("Can not find any images(jpg/png/bmp/jpeg/tiff) from directory: %s", int8_images_folder.c_str()); return false; } if(entropyCalibratorFiles.size() < max_batch_size){ INFOW("Too few images provided, %d[provided] < %d[max batch size], image copy will be performed", entropyCalibratorFiles.size(), max_batch_size); for(int i = entropyCalibratorFiles.size(); i < max_batch_size; ++i) entropyCalibratorFiles.push_back(entropyCalibratorFiles[i % entropyCalibratorFiles.size()]); } } } else { if (hasEntropyCalibrator) { INFOW("int8_entropy_calibrator_cache_file is ignore, when Mode is '%s'", mode_string(mode)); } } INFO("Compile %s %s.", mode_string(mode), source_onnx.c_str()); shared_ptr<IBuilder> builder(createInferBuilder(gLogger), destroy_nvidia_pointer<IBuilder>); if (builder == nullptr) { INFOE("Can not create builder."); return false; } shared_ptr<IBuilderConfig> config(builder->createBuilderConfig(), destroy_nvidia_pointer<IBuilderConfig>); if (mode == Mode::FP16) { if (!builder->platformHasFastFp16()) { INFOW("Platform not have fast fp16 support"); } config->setFlag(BuilderFlag::kFP16); } else if (mode == Mode::INT8) { if (!builder->platformHasFastInt8()) { INFOW("Platform not have fast int8 support"); } config->setFlag(BuilderFlag::kINT8); } shared_ptr<INetworkDefinition> network; shared_ptr<nvonnxparser::IParser> onnxParser; const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); network = shared_ptr<INetworkDefinition>(builder->createNetworkV2(explicitBatch), destroy_nvidia_pointer<INetworkDefinition>); //from onnx is not markOutput onnxParser.reset(nvonnxparser::createParser(*network, gLogger), destroy_nvidia_pointer<nvonnxparser::IParser>); if (onnxParser == nullptr) { INFOE("Can not create parser."); return false; } if (!onnxParser->parseFromFile(source_onnx.c_str(), 1)) { INFOE("Can not parse OnnX file: %s", source_onnx.c_str()); return false; } auto inputTensor = network->getInput(0); auto inputDims = inputTensor->getDimensions(); shared_ptr<Int8EntropyCalibrator> int8Calibrator; if (mode == Mode::INT8) { auto calibratorDims = inputDims; calibratorDims.d[0] = max_batch_size; if (hasEntropyCalibrator) { INFO("Using exist entropy calibrator data[%d bytes]: %s", entropyCalibratorData.size(), int8_entropy_calibrator_cache_file.c_str()); int8Calibrator.reset(new Int8EntropyCalibrator( entropyCalibratorData, calibratorDims, int8process )); } else { INFO("Using image list[%d files]: %s", entropyCalibratorFiles.size(), int8_images_folder.c_str()); int8Calibrator.reset(new Int8EntropyCalibrator( entropyCalibratorFiles, calibratorDims, int8process )); } config->setInt8Calibrator(int8Calibrator.get()); } INFO("Input shape is %s", join_dims(vector<int>(inputDims.d, inputDims.d + inputDims.nbDims)).c_str()); INFO("Set max batch size = %d", max_batch_size); INFO("Set max workspace size = %.2f MB", max_workspace_size / 1024.0f / 1024.0f); int net_num_input = network->getNbInputs(); INFO("Network has %d inputs:", net_num_input); vector<string> input_names(net_num_input); for(int i = 0; i < net_num_input; ++i){ auto tensor = network->getInput(i); auto dims = tensor->getDimensions(); auto dims_str = join_dims(vector<int>(dims.d, dims.d+dims.nbDims)); INFO(" %d.[%s] shape is %s", i, tensor->getName(), dims_str.c_str()); input_names[i] = tensor->getName(); } int net_num_output = network->getNbOutputs(); INFO("Network has %d outputs:", net_num_output); for(int i = 0; i < net_num_output; ++i){ auto tensor = network->getOutput(i); auto dims = tensor->getDimensions(); auto dims_str = join_dims(vector<int>(dims.d, dims.d+dims.nbDims)); INFO(" %d.[%s] shape is %s", i, tensor->getName(), dims_str.c_str()); } int net_num_layers = network->getNbLayers(); INFO("Network has %d layers", net_num_layers); builder->setMaxBatchSize(max_batch_size); config->setMaxWorkspaceSize(max_workspace_size); auto profile = builder->createOptimizationProfile(); for(int i = 0; i < net_num_input; ++i){ auto input = network->getInput(i); auto input_dims = input->getDimensions(); input_dims.d[0] = 1; profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims); profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims); input_dims.d[0] = max_batch_size; profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims); } config->addOptimizationProfile(profile); INFO("Building engine..."); auto time_start = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count(); shared_ptr<ICudaEngine> engine(builder->buildEngineWithConfig(*network, *config), destroy_nvidia_pointer<ICudaEngine>); if (engine == nullptr) { INFOE("engine is nullptr"); return false; } if (mode == Mode::INT8) { if (!hasEntropyCalibrator) { if (!int8_entropy_calibrator_cache_file.empty()) { INFO("Save calibrator to: %s", int8_entropy_calibrator_cache_file.c_str()); save_file(int8_entropy_calibrator_cache_file, int8Calibrator->getEntropyCalibratorData()); } else { INFO("No set entropyCalibratorFile, and entropyCalibrator will not save."); } } } auto time_end = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count(); INFO("Build done %lld ms !", time_end - time_start); // serialize the engine, then close everything down shared_ptr<IHostMemory> seridata(engine->serialize(), destroy_nvidia_pointer<IHostMemory>); return save_file(saveto, seridata->data(), seridata->size()); } };
6f510f28a7fdd03ab07e8a41468ec1c3630e46c0.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> __global__ void addKernel01(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel02(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel03(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel04(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel05(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[i] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel06(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel07(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel08(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[32 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel09(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[40 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel10(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[16 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel11(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[8 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; }
6f510f28a7fdd03ab07e8a41468ec1c3630e46c0.cu
#include <cuda.h> #include <cuda_runtime.h> __global__ void addKernel01(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel02(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel03(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel04(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel05(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[i] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel06(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel07(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[48 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel08(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[32 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel09(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[40 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel10(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[16 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; } __global__ void addKernel11(int *c, int *a, int *b, int repeat) { __shared__ unsigned char s[8 * 1024]; int i = threadIdx.x; int j = i; for (int n = 0; n < repeat; n++) s[i % 64] = 1; for (int n = 0; n < repeat; n++) c[j] = a[i] + b[i] + s[i % 64]; }
d4355409bd7049b3a2b2ac0d095bfcb2df710208.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2018 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/linear_updater.h> #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "coordinate_common.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); // training parameter struct GPUCoordinateTrainParam : public dmlc::Parameter<GPUCoordinateTrainParam> { /*! \brief learning_rate */ float learning_rate; /*! \brief regularization weight for L2 norm */ float reg_lambda; /*! \brief regularization weight for L1 norm */ float reg_alpha; int feature_selector; int top_k; int debug_verbose; int n_gpus; int gpu_id; bool silent; // declare parameters DMLC_DECLARE_PARAMETER(GPUCoordinateTrainParam) { DMLC_DECLARE_FIELD(learning_rate) .set_lower_bound(0.0f) .set_default(1.0f) .describe("Learning rate of each update."); DMLC_DECLARE_FIELD(reg_lambda) .set_lower_bound(0.0f) .set_default(0.0f) .describe("L2 regularization on weights."); DMLC_DECLARE_FIELD(reg_alpha) .set_lower_bound(0.0f) .set_default(0.0f) .describe("L1 regularization on weights."); DMLC_DECLARE_FIELD(feature_selector) .set_default(kCyclic) .add_enum("cyclic", kCyclic) .add_enum("shuffle", kShuffle) .add_enum("thrifty", kThrifty) .add_enum("greedy", kGreedy) .add_enum("random", kRandom) .describe("Feature selection or ordering method."); DMLC_DECLARE_FIELD(top_k).set_lower_bound(0).set_default(0).describe( "The number of top features to select in 'thrifty' feature_selector. " "The value of zero means using all the features."); DMLC_DECLARE_FIELD(debug_verbose) .set_lower_bound(0) .set_default(0) .describe("flag to print out detailed breakdown of runtime"); DMLC_DECLARE_FIELD(n_gpus).set_default(1).describe( "Number of devices to use."); DMLC_DECLARE_FIELD(gpu_id).set_default(0).describe( "Primary device ordinal."); DMLC_DECLARE_FIELD(silent).set_default(false).describe( "Do not print information during trainig."); // alias of parameters DMLC_DECLARE_ALIAS(learning_rate, eta); DMLC_DECLARE_ALIAS(reg_lambda, lambda); DMLC_DECLARE_ALIAS(reg_alpha, alpha); } /*! \brief Denormalizes the regularization penalties - to be called at each * update */ void DenormalizePenalties(double sum_instance_weight) { reg_lambda_denorm = reg_lambda * sum_instance_weight; reg_alpha_denorm = reg_alpha * sum_instance_weight; } // denormalizated regularization penalties float reg_lambda_denorm; float reg_alpha_denorm; }; void RescaleIndices(size_t ridx_begin, dh::DVec<Entry> *data) { auto d_data = data->Data(); dh::LaunchN(data->DeviceIdx(), data->Size(), [=] __device__(size_t idx) { d_data[idx].index -= ridx_begin; }); } class DeviceShard { int device_idx_; int normalised_device_idx_; // Device index counting from param.gpu_id dh::BulkAllocator<dh::MemoryType::kDevice> ba_; std::vector<size_t> row_ptr_; dh::DVec<Entry> data_; dh::DVec<GradientPair> gpair_; dh::CubMemory temp_; size_t ridx_begin_; size_t ridx_end_; public: DeviceShard(int device_idx, int normalised_device_idx, const SparsePage &batch, bst_uint row_begin, bst_uint row_end, const GPUCoordinateTrainParam &param, const gbm::GBLinearModelParam &model_param) : device_idx_(device_idx), normalised_device_idx_(normalised_device_idx), ridx_begin_(row_begin), ridx_end_(row_end) { dh::safe_cuda(hipSetDevice(device_idx)); // The begin and end indices for the section of each column associated with // this shard std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; for (auto fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.data(), col.data() + col.size(), Entry(row_begin, 0.0f), cmp); auto column_end = std::upper_bound(col.data(), col.data() + col.size(), Entry(row_end, 0.0f), cmp); column_segments.push_back( std::make_pair(column_begin - col.data(), column_end - col.data())); row_ptr_.push_back(row_ptr_.back() + column_end - column_begin); } ba_.Allocate(device_idx, param.silent, &data_, row_ptr_.back(), &gpair_, (row_end - row_begin) * model_param.num_output_group); for (int fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(hipMemcpy( data_.Data() + row_ptr_[fidx], col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), hipMemcpyHostToDevice)); } // Rescale indices with respect to current shard RescaleIndices(ridx_begin_, &data_); } void UpdateGpair(const std::vector<GradientPair> &host_gpair, const gbm::GBLinearModelParam &model_param) { gpair_.copy(host_gpair.begin() + ridx_begin_ * model_param.num_output_group, host_gpair.begin() + ridx_end_ * model_param.num_output_group); } GradientPair GetBiasGradient(int group_idx, int num_group) { auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.tbegin(), skip); return dh::SumReduction(temp_, perm, ridx_end_ - ridx_begin_); } void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = gpair_.Data(); dh::LaunchN(device_idx_, ridx_end_ - ridx_begin_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } GradientPair GetGradient(int group_idx, int num_group, int fidx) { auto d_col = data_.Data() + row_ptr_[fidx]; size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; auto d_gpair = gpair_.Data(); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(temp_, multiply_iterator, col_size); } void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { auto d_gpair = gpair_.Data(); auto d_col = data_.Data() + row_ptr_[fidx]; size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(device_idx_, col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } }; /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { public: // set training parameter void Init( const std::vector<std::pair<std::string, std::string>> &args) override { param.InitAllowUnknown(args); selector.reset(FeatureSelector::Create(param.feature_selector)); monitor.Init("GPUCoordinateUpdater", param.debug_verbose); } void LazyInitShards(DMatrix *p_fmat, const gbm::GBLinearModelParam &model_param) { if (!shards.empty()) return; int n_devices = GPUSet::All(param.n_gpus, p_fmat->Info().num_row_).Size(); bst_uint row_begin = 0; bst_uint shard_size = ::ceil(static_cast<double>(p_fmat->Info().num_row_) / n_devices); device_list.resize(n_devices); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { int device_idx = GPUSet::GetDeviceIdx(param.gpu_id + d_idx); device_list[d_idx] = device_idx; } // Partition input matrix into row segments std::vector<size_t> row_segments; row_segments.push_back(0); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { bst_uint row_end = ::min(static_cast<size_t>(row_begin + shard_size), p_fmat->Info().num_row_); row_segments.push_back(row_end); row_begin = row_end; } CHECK(p_fmat->SingleColBlock()); const auto &batch = *p_fmat->GetColumnBatches().begin(); shards.resize(n_devices); // Create device shards dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { auto idx = &shard - &shards[0]; shard = std::unique_ptr<DeviceShard>( new DeviceShard(device_list[idx], idx, batch, row_segments[idx], row_segments[idx + 1], param, model_param)); }); } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { param.DenormalizePenalties(sum_instance_weight); monitor.Start("LazyInitShards"); this->LazyInitShards(p_fmat, model->param); monitor.Stop("LazyInitShards"); monitor.Start("UpdateGpair"); // Update gpair dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { shard->UpdateGpair(in_gpair->ConstHostVector(), model->param); }); monitor.Stop("UpdateGpair"); monitor.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor.Stop("UpdateBias"); // prepare for updating the weights selector->Setup(*model, in_gpair->ConstHostVector(), p_fmat, param.reg_alpha_denorm, param.reg_lambda_denorm, param.top_k); monitor.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->param.num_output_group; ++group_idx) { for (auto i = 0U; i < model->param.num_feature; i++) { auto fidx = selector->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, param.reg_alpha_denorm, param.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->param.num_output_group; ++group_idx) { // Get gradient auto grad = dh::ReduceShards<GradientPair>( &shards, [&](std::unique_ptr<DeviceShard> &shard) { return shard->GetBiasGradient(group_idx, model->param.num_output_group); }); auto dbias = static_cast<float>( param.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->bias()[group_idx] += dbias; // Update residual dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { shard->UpdateBiasResidual(dbias, group_idx, model->param.num_output_group); }); } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = dh::ReduceShards<GradientPair>( &shards, [&](std::unique_ptr<DeviceShard> &shard) { return shard->GetGradient(group_idx, model->param.num_output_group, fidx); }); auto dw = static_cast<float>(param.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, param.reg_alpha_denorm, param.reg_lambda_denorm)); w += dw; dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { shard->UpdateResidual(dw, group_idx, model->param.num_output_group, fidx); }); } // training parameter GPUCoordinateTrainParam param; std::unique_ptr<FeatureSelector> selector; common::Monitor monitor; std::vector<std::unique_ptr<DeviceShard>> shards; std::vector<int> device_list; }; DMLC_REGISTER_PARAMETER(GPUCoordinateTrainParam); XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
d4355409bd7049b3a2b2ac0d095bfcb2df710208.cu
/*! * Copyright 2018 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/linear_updater.h> #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "coordinate_common.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); // training parameter struct GPUCoordinateTrainParam : public dmlc::Parameter<GPUCoordinateTrainParam> { /*! \brief learning_rate */ float learning_rate; /*! \brief regularization weight for L2 norm */ float reg_lambda; /*! \brief regularization weight for L1 norm */ float reg_alpha; int feature_selector; int top_k; int debug_verbose; int n_gpus; int gpu_id; bool silent; // declare parameters DMLC_DECLARE_PARAMETER(GPUCoordinateTrainParam) { DMLC_DECLARE_FIELD(learning_rate) .set_lower_bound(0.0f) .set_default(1.0f) .describe("Learning rate of each update."); DMLC_DECLARE_FIELD(reg_lambda) .set_lower_bound(0.0f) .set_default(0.0f) .describe("L2 regularization on weights."); DMLC_DECLARE_FIELD(reg_alpha) .set_lower_bound(0.0f) .set_default(0.0f) .describe("L1 regularization on weights."); DMLC_DECLARE_FIELD(feature_selector) .set_default(kCyclic) .add_enum("cyclic", kCyclic) .add_enum("shuffle", kShuffle) .add_enum("thrifty", kThrifty) .add_enum("greedy", kGreedy) .add_enum("random", kRandom) .describe("Feature selection or ordering method."); DMLC_DECLARE_FIELD(top_k).set_lower_bound(0).set_default(0).describe( "The number of top features to select in 'thrifty' feature_selector. " "The value of zero means using all the features."); DMLC_DECLARE_FIELD(debug_verbose) .set_lower_bound(0) .set_default(0) .describe("flag to print out detailed breakdown of runtime"); DMLC_DECLARE_FIELD(n_gpus).set_default(1).describe( "Number of devices to use."); DMLC_DECLARE_FIELD(gpu_id).set_default(0).describe( "Primary device ordinal."); DMLC_DECLARE_FIELD(silent).set_default(false).describe( "Do not print information during trainig."); // alias of parameters DMLC_DECLARE_ALIAS(learning_rate, eta); DMLC_DECLARE_ALIAS(reg_lambda, lambda); DMLC_DECLARE_ALIAS(reg_alpha, alpha); } /*! \brief Denormalizes the regularization penalties - to be called at each * update */ void DenormalizePenalties(double sum_instance_weight) { reg_lambda_denorm = reg_lambda * sum_instance_weight; reg_alpha_denorm = reg_alpha * sum_instance_weight; } // denormalizated regularization penalties float reg_lambda_denorm; float reg_alpha_denorm; }; void RescaleIndices(size_t ridx_begin, dh::DVec<Entry> *data) { auto d_data = data->Data(); dh::LaunchN(data->DeviceIdx(), data->Size(), [=] __device__(size_t idx) { d_data[idx].index -= ridx_begin; }); } class DeviceShard { int device_idx_; int normalised_device_idx_; // Device index counting from param.gpu_id dh::BulkAllocator<dh::MemoryType::kDevice> ba_; std::vector<size_t> row_ptr_; dh::DVec<Entry> data_; dh::DVec<GradientPair> gpair_; dh::CubMemory temp_; size_t ridx_begin_; size_t ridx_end_; public: DeviceShard(int device_idx, int normalised_device_idx, const SparsePage &batch, bst_uint row_begin, bst_uint row_end, const GPUCoordinateTrainParam &param, const gbm::GBLinearModelParam &model_param) : device_idx_(device_idx), normalised_device_idx_(normalised_device_idx), ridx_begin_(row_begin), ridx_end_(row_end) { dh::safe_cuda(cudaSetDevice(device_idx)); // The begin and end indices for the section of each column associated with // this shard std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; for (auto fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.data(), col.data() + col.size(), Entry(row_begin, 0.0f), cmp); auto column_end = std::upper_bound(col.data(), col.data() + col.size(), Entry(row_end, 0.0f), cmp); column_segments.push_back( std::make_pair(column_begin - col.data(), column_end - col.data())); row_ptr_.push_back(row_ptr_.back() + column_end - column_begin); } ba_.Allocate(device_idx, param.silent, &data_, row_ptr_.back(), &gpair_, (row_end - row_begin) * model_param.num_output_group); for (int fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(cudaMemcpy( data_.Data() + row_ptr_[fidx], col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), cudaMemcpyHostToDevice)); } // Rescale indices with respect to current shard RescaleIndices(ridx_begin_, &data_); } void UpdateGpair(const std::vector<GradientPair> &host_gpair, const gbm::GBLinearModelParam &model_param) { gpair_.copy(host_gpair.begin() + ridx_begin_ * model_param.num_output_group, host_gpair.begin() + ridx_end_ * model_param.num_output_group); } GradientPair GetBiasGradient(int group_idx, int num_group) { auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.tbegin(), skip); return dh::SumReduction(temp_, perm, ridx_end_ - ridx_begin_); } void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = gpair_.Data(); dh::LaunchN(device_idx_, ridx_end_ - ridx_begin_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } GradientPair GetGradient(int group_idx, int num_group, int fidx) { auto d_col = data_.Data() + row_ptr_[fidx]; size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; auto d_gpair = gpair_.Data(); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(temp_, multiply_iterator, col_size); } void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { auto d_gpair = gpair_.Data(); auto d_col = data_.Data() + row_ptr_[fidx]; size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(device_idx_, col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } }; /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { public: // set training parameter void Init( const std::vector<std::pair<std::string, std::string>> &args) override { param.InitAllowUnknown(args); selector.reset(FeatureSelector::Create(param.feature_selector)); monitor.Init("GPUCoordinateUpdater", param.debug_verbose); } void LazyInitShards(DMatrix *p_fmat, const gbm::GBLinearModelParam &model_param) { if (!shards.empty()) return; int n_devices = GPUSet::All(param.n_gpus, p_fmat->Info().num_row_).Size(); bst_uint row_begin = 0; bst_uint shard_size = std::ceil(static_cast<double>(p_fmat->Info().num_row_) / n_devices); device_list.resize(n_devices); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { int device_idx = GPUSet::GetDeviceIdx(param.gpu_id + d_idx); device_list[d_idx] = device_idx; } // Partition input matrix into row segments std::vector<size_t> row_segments; row_segments.push_back(0); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { bst_uint row_end = std::min(static_cast<size_t>(row_begin + shard_size), p_fmat->Info().num_row_); row_segments.push_back(row_end); row_begin = row_end; } CHECK(p_fmat->SingleColBlock()); const auto &batch = *p_fmat->GetColumnBatches().begin(); shards.resize(n_devices); // Create device shards dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { auto idx = &shard - &shards[0]; shard = std::unique_ptr<DeviceShard>( new DeviceShard(device_list[idx], idx, batch, row_segments[idx], row_segments[idx + 1], param, model_param)); }); } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { param.DenormalizePenalties(sum_instance_weight); monitor.Start("LazyInitShards"); this->LazyInitShards(p_fmat, model->param); monitor.Stop("LazyInitShards"); monitor.Start("UpdateGpair"); // Update gpair dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { shard->UpdateGpair(in_gpair->ConstHostVector(), model->param); }); monitor.Stop("UpdateGpair"); monitor.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor.Stop("UpdateBias"); // prepare for updating the weights selector->Setup(*model, in_gpair->ConstHostVector(), p_fmat, param.reg_alpha_denorm, param.reg_lambda_denorm, param.top_k); monitor.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->param.num_output_group; ++group_idx) { for (auto i = 0U; i < model->param.num_feature; i++) { auto fidx = selector->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, param.reg_alpha_denorm, param.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->param.num_output_group; ++group_idx) { // Get gradient auto grad = dh::ReduceShards<GradientPair>( &shards, [&](std::unique_ptr<DeviceShard> &shard) { return shard->GetBiasGradient(group_idx, model->param.num_output_group); }); auto dbias = static_cast<float>( param.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->bias()[group_idx] += dbias; // Update residual dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { shard->UpdateBiasResidual(dbias, group_idx, model->param.num_output_group); }); } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = dh::ReduceShards<GradientPair>( &shards, [&](std::unique_ptr<DeviceShard> &shard) { return shard->GetGradient(group_idx, model->param.num_output_group, fidx); }); auto dw = static_cast<float>(param.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, param.reg_alpha_denorm, param.reg_lambda_denorm)); w += dw; dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { shard->UpdateResidual(dw, group_idx, model->param.num_output_group, fidx); }); } // training parameter GPUCoordinateTrainParam param; std::unique_ptr<FeatureSelector> selector; common::Monitor monitor; std::vector<std::unique_ptr<DeviceShard>> shards; std::vector<int> device_list; }; DMLC_REGISTER_PARAMETER(GPUCoordinateTrainParam); XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
b2de17d62e4ef04f1f660d3c62bafe00146a7045.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <cmath> #include <math.h> #include <ctime> __global__ void findwindow (bool* mask_img, int* scores) { int wh_p = blockIdx.x * blockDim.x + threadIdx.x; // threadIdx.x; int ui = blockIdx.y * blockDim.y + threadIdx.y; // 60 int vi = blockIdx.z * blockDim.z + threadIdx.z; // 80 int wh = 16; int cwhp = wh_p; while(cwhp > 0) { wh*=2; cwhp--; } if (ui == 0 && vi == 0) { //printf("wh:%d; wh_p:%d \n", wh, wh_p); } int score_id = wh_p * 60 * 80 + ui * 80 + vi; int start_u = ui*8; //60 480 int start_v = vi*8; //80 640 int sc = 0; if (start_u + wh >= 480 || start_v + wh >= 640) { scores[score_id] = 0; return; } for (int cu = ui*8; cu < ui*8+wh; cu++) { for (int cv = vi*8; cv < vi*8+wh; cv++) { int mask_id = cu * 640 + cv; if (mask_img[mask_id]) { sc = sc+1; //printf("cu: %d; cv: %d; mask_id: %d\n" , cu, cv, mask_id); } } } scores[score_id] = sc; if (sc > 0 && ui == 0 && vi == 0) { //printf("sc: %d wh:%d ui:%d vi:%d \n", sc, wh, ui, vi); } } __global__ void loop_2d_bbox (int* scores, int* uvl) { //float mul = 1.3*1.3*1.3*1.3*1.3; float mul = 3*3*3*3*3; float final_c_best = 0.; //printf("start! \n"); for (int idx = 0; idx < 4; idx++) { //mul /= 3.5; mul /= 3; int icbest = 0; int csu = 0, csv = 0, cwh = 0; for (int i = 0; i < 60; i++) { for (int j = 0; j < 80; j++) { if (scores[idx*4800+i*80+j] > icbest) { icbest = scores[idx*4800+i*80+j]; csu = i*8; csv = j*8; cwh = idx; //printf("icbest: %d su: %d, sv: %d, wh: %d\n", icbest, csu, csv, cwh); } } } if (float(icbest)*mul > final_c_best) { final_c_best = float(icbest)*mul; uvl[0] = csu; uvl[1] = csv; uvl[2] = cwh; //printf("su: %d, sv: %d, wh: %d\n", csu, csv, cwh); } } } void rgb_window(bool* mask_img, int* scores, int* uvl) { //dim3 grid(4, 1, 1); //dim3 block(1, 60, 80); dim3 grid(4, 60, 80); dim3 block(1, 1, 1); hipLaunchKernelGGL(( findwindow), dim3(grid), dim3(block), 0, 0, mask_img, scores); dim3 loop_grid(1, 1, 1); dim3 loop_block(1, 1, 1); hipLaunchKernelGGL(( loop_2d_bbox), dim3(loop_grid), dim3(loop_block), 0, 0, scores, uvl); hipDeviceSynchronize(); } __global__ void para_find_loc (float* pts, int ptnum, int* scores, float* xyz_limits) { int d_ix = blockIdx.x * blockDim.x + threadIdx.x; int d_iy = blockIdx.y * blockDim.y + threadIdx.y; int d_iz = blockIdx.z * blockDim.z + threadIdx.z; //printf("d_ix: %d d_iy: %d d_iz: %d\n", d_ix, d_iy, d_iz); float start_x = xyz_limits[0]; float start_y = xyz_limits[2]; float start_z = xyz_limits[4]; //printf("start_x: %.0f start_y: %.0f start_z: %.0f\n", start_x, start_y, start_z); float end_x = xyz_limits[1]; float end_y = xyz_limits[3]; float end_z = xyz_limits[5]; float cx = start_x + d_ix*10; float cy = start_y + d_iy*10; float cz = start_z + d_iz*10; if (cx > end_x || cy > end_y || cz > end_z) { //printf("cx: %.0f cy: %.0f cz: %.0f end_x: %.0f end_y: %.0f end_z: %.0f ", cx, cy, cz, end_x, end_y, end_z); scores[d_ix*100*400+d_iy*400+d_iz] = 0; return; } //printf("cx: %.0f cy: %.0f cz: %.0f end_x: %.0f end_y: %.0f end_z: %.0f \n", cx, cy, cz, end_x, end_y, end_z); int cnt = 0; for(int i = 0; i < ptnum; i++) { float tx = pts[i*3]; float ty = pts[i*3+1]; float tz = pts[i*3+2]; if (tz > cz) continue; float d2c = sqrt((tx-cx)*(tx-cx) + (ty-cy)*(ty-cy) + (tz-cz)*(tz-cz)); //printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); /* if (d2c < 1000) { printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); } */ //printf("tx: %.0f ty: %.0f tz: %.0f cx: %.0f cy: %.0f cz: %.0f \n", tx, ty, tz, cx, cy, cz); //printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); if (d2c >= 50 && d2c <= 53 ) { cnt += 1; } } scores[d_ix*100*400+d_iy*400+d_iz] = cnt; } __global__ void find_best_score (int* scores, float* xyz_limits, float* device_pred_xyz) { int c_best = 0; device_pred_xyz[0] = -10000; device_pred_xyz[1] = -10000; device_pred_xyz[2] = -10000; int ixmax = int((xyz_limits[1] - xyz_limits[0])/10); if (ixmax > 100) ixmax = 100; int iymax = int((xyz_limits[3] - xyz_limits[2])/10); if (iymax > 100) iymax = 100; int izmax = int((xyz_limits[5] - xyz_limits[4])/10); //if (izmax > 400) izmax = 400; if (izmax > 100) izmax = 100; printf("ixmax : %d; iymax : %d; izmax : %d\n", ixmax, iymax, izmax); for (int ix = 0; ix < ixmax; ix++) { for (int iy = 0; iy < iymax; iy++) { for (int iz = 0; iz < izmax; iz++) { //c_best = c_best > scores[ix*100*400+iy*400+iz] ? c_best : scores[ix*100*400+iy*400+iz]; if (c_best < scores[ix*100*400+iy*400+iz]) { c_best = scores[ix*100*400+iy*400+iz]; device_pred_xyz[0] = xyz_limits[0] + 10*ix; device_pred_xyz[1] = xyz_limits[2] + 10*iy; device_pred_xyz[2] = xyz_limits[4] + 10*iz; //printf("Score: %d x: %.0f y: %.0f z:%.0f \n", c_best, device_pred_xyz[0], device_pred_xyz[1], device_pred_xyz[2]); } } } } } void find_loc(float* pts, int ptnum, int* scores, float* xyz_limits, float* device_pred_xyz) { //dim3 grid(10, 100, 1); //dim3 block(10, 1, 400); dim3 grid(100, 100, 2); dim3 block(1, 1, 50); std::clock_t start, end; start = std::clock(); hipLaunchKernelGGL(( para_find_loc), dim3(grid), dim3(block), 0, 0, pts, ptnum, scores, xyz_limits); end = std::clock(); printf("para_find_loc: %.3f ms\n", 1000. * (end - start)/CLOCKS_PER_SEC); start = std::clock(); hipLaunchKernelGGL(( find_best_score), dim3(1), dim3(1), 0, 0, scores, xyz_limits, device_pred_xyz); end = std::clock(); printf("find_best_score: %.3f ms\n", 1000. * (end - start)/CLOCKS_PER_SEC); hipDeviceSynchronize(); } __global__ void para_find_loc_fine (float* pts, int ptnum, int* scores, float* xyz_limits) { int d_ix = blockIdx.x * blockDim.x + threadIdx.x; int d_iy = blockIdx.y * blockDim.y + threadIdx.y; int d_iz = blockIdx.z * blockDim.z + threadIdx.z; //printf("d_ix: %d d_iy: %d d_iz: %d\n", d_ix, d_iy, d_iz); float start_x = xyz_limits[0]; float start_y = xyz_limits[2]; float start_z = xyz_limits[4]; //printf("start_x: %.0f start_y: %.0f start_z: %.0f\n", start_x, start_y, start_z); float end_x = xyz_limits[1]; float end_y = xyz_limits[3]; float end_z = xyz_limits[5]; /* float cx = start_x + d_ix*10; float cy = start_y + d_iy*10; float cz = start_z + d_iz*10; */ float cx = start_x + d_ix*5; float cy = start_y + d_iy*5; float cz = start_z + d_iz*5; if (cx > end_x || cy > end_y || cz > end_z) { //printf("cx: %.0f cy: %.0f cz: %.0f end_x: %.0f end_y: %.0f end_z: %.0f ", cx, cy, cz, end_x, end_y, end_z); scores[d_ix*100*400+d_iy*400+d_iz] = 0; return; } //printf("cx: %.0f cy: %.0f cz: %.0f end_x: %.0f end_y: %.0f end_z: %.0f \n", cx, cy, cz, end_x, end_y, end_z); int cnt = 0; for(int i = 0; i < ptnum; i++) { float tx = pts[i*3]; float ty = pts[i*3+1]; float tz = pts[i*3+2]; if (tz > cz) continue; float d2c = sqrt((tx-cx)*(tx-cx) + (ty-cy)*(ty-cy) + (tz-cz)*(tz-cz)); //printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); /* if (d2c < 1000) { printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); } */ //printf("tx: %.0f ty: %.0f tz: %.0f cx: %.0f cy: %.0f cz: %.0f \n", tx, ty, tz, cx, cy, cz); //printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); if (d2c >= 51 && d2c <= 54 ) { cnt += 1; } } scores[d_ix*100*400+d_iy*400+d_iz] = cnt; } __global__ void find_best_score_fine (int* scores, float* xyz_limits, float* device_pred_xyz) { int c_best = 0; device_pred_xyz[0] = -10000; device_pred_xyz[1] = -10000; device_pred_xyz[2] = -10000; int ixmax = int((xyz_limits[1] - xyz_limits[0])/2); if (ixmax > 100) ixmax = 100; int iymax = int((xyz_limits[3] - xyz_limits[2])/2); if (iymax > 100) iymax = 100; int izmax = int((xyz_limits[5] - xyz_limits[4])/2); if (izmax > 400) izmax = 400; printf("ixmax : %d; iymax : %d; izmax : %d\n", ixmax, iymax, izmax); for (int ix = 0; ix < ixmax; ix++) { for (int iy = 0; iy < iymax; iy++) { for (int iz = 0; iz < izmax; iz++) { //c_best = c_best > scores[ix*100*400+iy*400+iz] ? c_best : scores[ix*100*400+iy*400+iz]; if (c_best < scores[ix*100*400+iy*400+iz]) { c_best = scores[ix*100*400+iy*400+iz]; device_pred_xyz[0] = xyz_limits[0] + 5*ix; device_pred_xyz[1] = xyz_limits[2] + 5*iy; device_pred_xyz[2] = xyz_limits[4] + 5*iz; //printf("Score: %d x: %.0f y: %.0f z:%.0f \n", c_best, device_pred_xyz[0], device_pred_xyz[1], device_pred_xyz[2]); } } } } } void find_loc_fine(float* pts, int ptnum, int* scores, float* xyz_limits, float* device_pred_xyz) { //dim3 grid(10, 100, 1); //dim3 block(10, 1, 400); dim3 grid(100, 10, 8); dim3 block(1, 10, 50); hipLaunchKernelGGL(( para_find_loc_fine), dim3(grid), dim3(block), 0, 0, pts, ptnum, scores, xyz_limits); hipLaunchKernelGGL(( find_best_score), dim3(1), dim3(1), 0, 0, scores, xyz_limits, device_pred_xyz); hipDeviceSynchronize(); }
b2de17d62e4ef04f1f660d3c62bafe00146a7045.cu
#include <stdio.h> #include <cmath> #include <math.h> #include <ctime> __global__ void findwindow (bool* mask_img, int* scores) { int wh_p = blockIdx.x * blockDim.x + threadIdx.x; // threadIdx.x; int ui = blockIdx.y * blockDim.y + threadIdx.y; // 60 int vi = blockIdx.z * blockDim.z + threadIdx.z; // 80 int wh = 16; int cwhp = wh_p; while(cwhp > 0) { wh*=2; cwhp--; } if (ui == 0 && vi == 0) { //printf("wh: %d; wh_p: %d \n", wh, wh_p); } int score_id = wh_p * 60 * 80 + ui * 80 + vi; int start_u = ui*8; //60 480 int start_v = vi*8; //80 640 int sc = 0; if (start_u + wh >= 480 || start_v + wh >= 640) { scores[score_id] = 0; return; } for (int cu = ui*8; cu < ui*8+wh; cu++) { for (int cv = vi*8; cv < vi*8+wh; cv++) { int mask_id = cu * 640 + cv; if (mask_img[mask_id]) { sc = sc+1; //printf("cu: %d; cv: %d; mask_id: %d\n" , cu, cv, mask_id); } } } scores[score_id] = sc; if (sc > 0 && ui == 0 && vi == 0) { //printf("sc: %d wh: %d ui: %d vi: %d \n", sc, wh, ui, vi); } } __global__ void loop_2d_bbox (int* scores, int* uvl) { //float mul = 1.3*1.3*1.3*1.3*1.3; float mul = 3*3*3*3*3; float final_c_best = 0.; //printf("start! \n"); for (int idx = 0; idx < 4; idx++) { //mul /= 3.5; mul /= 3; int icbest = 0; int csu = 0, csv = 0, cwh = 0; for (int i = 0; i < 60; i++) { for (int j = 0; j < 80; j++) { if (scores[idx*4800+i*80+j] > icbest) { icbest = scores[idx*4800+i*80+j]; csu = i*8; csv = j*8; cwh = idx; //printf("icbest: %d su: %d, sv: %d, wh: %d\n", icbest, csu, csv, cwh); } } } if (float(icbest)*mul > final_c_best) { final_c_best = float(icbest)*mul; uvl[0] = csu; uvl[1] = csv; uvl[2] = cwh; //printf("su: %d, sv: %d, wh: %d\n", csu, csv, cwh); } } } void rgb_window(bool* mask_img, int* scores, int* uvl) { //dim3 grid(4, 1, 1); //dim3 block(1, 60, 80); dim3 grid(4, 60, 80); dim3 block(1, 1, 1); findwindow<<<grid, block>>>(mask_img, scores); dim3 loop_grid(1, 1, 1); dim3 loop_block(1, 1, 1); loop_2d_bbox<<<loop_grid, loop_block>>>(scores, uvl); cudaDeviceSynchronize(); } __global__ void para_find_loc (float* pts, int ptnum, int* scores, float* xyz_limits) { int d_ix = blockIdx.x * blockDim.x + threadIdx.x; int d_iy = blockIdx.y * blockDim.y + threadIdx.y; int d_iz = blockIdx.z * blockDim.z + threadIdx.z; //printf("d_ix: %d d_iy: %d d_iz: %d\n", d_ix, d_iy, d_iz); float start_x = xyz_limits[0]; float start_y = xyz_limits[2]; float start_z = xyz_limits[4]; //printf("start_x: %.0f start_y: %.0f start_z: %.0f\n", start_x, start_y, start_z); float end_x = xyz_limits[1]; float end_y = xyz_limits[3]; float end_z = xyz_limits[5]; float cx = start_x + d_ix*10; float cy = start_y + d_iy*10; float cz = start_z + d_iz*10; if (cx > end_x || cy > end_y || cz > end_z) { //printf("cx: %.0f cy: %.0f cz: %.0f end_x: %.0f end_y: %.0f end_z: %.0f ", cx, cy, cz, end_x, end_y, end_z); scores[d_ix*100*400+d_iy*400+d_iz] = 0; return; } //printf("cx: %.0f cy: %.0f cz: %.0f end_x: %.0f end_y: %.0f end_z: %.0f \n", cx, cy, cz, end_x, end_y, end_z); int cnt = 0; for(int i = 0; i < ptnum; i++) { float tx = pts[i*3]; float ty = pts[i*3+1]; float tz = pts[i*3+2]; if (tz > cz) continue; float d2c = sqrt((tx-cx)*(tx-cx) + (ty-cy)*(ty-cy) + (tz-cz)*(tz-cz)); //printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); /* if (d2c < 1000) { printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); } */ //printf("tx: %.0f ty: %.0f tz: %.0f cx: %.0f cy: %.0f cz: %.0f \n", tx, ty, tz, cx, cy, cz); //printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); if (d2c >= 50 && d2c <= 53 ) { cnt += 1; } } scores[d_ix*100*400+d_iy*400+d_iz] = cnt; } __global__ void find_best_score (int* scores, float* xyz_limits, float* device_pred_xyz) { int c_best = 0; device_pred_xyz[0] = -10000; device_pred_xyz[1] = -10000; device_pred_xyz[2] = -10000; int ixmax = int((xyz_limits[1] - xyz_limits[0])/10); if (ixmax > 100) ixmax = 100; int iymax = int((xyz_limits[3] - xyz_limits[2])/10); if (iymax > 100) iymax = 100; int izmax = int((xyz_limits[5] - xyz_limits[4])/10); //if (izmax > 400) izmax = 400; if (izmax > 100) izmax = 100; printf("ixmax : %d; iymax : %d; izmax : %d\n", ixmax, iymax, izmax); for (int ix = 0; ix < ixmax; ix++) { for (int iy = 0; iy < iymax; iy++) { for (int iz = 0; iz < izmax; iz++) { //c_best = c_best > scores[ix*100*400+iy*400+iz] ? c_best : scores[ix*100*400+iy*400+iz]; if (c_best < scores[ix*100*400+iy*400+iz]) { c_best = scores[ix*100*400+iy*400+iz]; device_pred_xyz[0] = xyz_limits[0] + 10*ix; device_pred_xyz[1] = xyz_limits[2] + 10*iy; device_pred_xyz[2] = xyz_limits[4] + 10*iz; //printf("Score: %d x: %.0f y: %.0f z:%.0f \n", c_best, device_pred_xyz[0], device_pred_xyz[1], device_pred_xyz[2]); } } } } } void find_loc(float* pts, int ptnum, int* scores, float* xyz_limits, float* device_pred_xyz) { //dim3 grid(10, 100, 1); //dim3 block(10, 1, 400); dim3 grid(100, 100, 2); dim3 block(1, 1, 50); std::clock_t start, end; start = std::clock(); para_find_loc<<<grid, block>>>(pts, ptnum, scores, xyz_limits); end = std::clock(); printf("para_find_loc: %.3f ms\n", 1000. * (end - start)/CLOCKS_PER_SEC); start = std::clock(); find_best_score<<<1, 1>>>(scores, xyz_limits, device_pred_xyz); end = std::clock(); printf("find_best_score: %.3f ms\n", 1000. * (end - start)/CLOCKS_PER_SEC); cudaDeviceSynchronize(); } __global__ void para_find_loc_fine (float* pts, int ptnum, int* scores, float* xyz_limits) { int d_ix = blockIdx.x * blockDim.x + threadIdx.x; int d_iy = blockIdx.y * blockDim.y + threadIdx.y; int d_iz = blockIdx.z * blockDim.z + threadIdx.z; //printf("d_ix: %d d_iy: %d d_iz: %d\n", d_ix, d_iy, d_iz); float start_x = xyz_limits[0]; float start_y = xyz_limits[2]; float start_z = xyz_limits[4]; //printf("start_x: %.0f start_y: %.0f start_z: %.0f\n", start_x, start_y, start_z); float end_x = xyz_limits[1]; float end_y = xyz_limits[3]; float end_z = xyz_limits[5]; /* float cx = start_x + d_ix*10; float cy = start_y + d_iy*10; float cz = start_z + d_iz*10; */ float cx = start_x + d_ix*5; float cy = start_y + d_iy*5; float cz = start_z + d_iz*5; if (cx > end_x || cy > end_y || cz > end_z) { //printf("cx: %.0f cy: %.0f cz: %.0f end_x: %.0f end_y: %.0f end_z: %.0f ", cx, cy, cz, end_x, end_y, end_z); scores[d_ix*100*400+d_iy*400+d_iz] = 0; return; } //printf("cx: %.0f cy: %.0f cz: %.0f end_x: %.0f end_y: %.0f end_z: %.0f \n", cx, cy, cz, end_x, end_y, end_z); int cnt = 0; for(int i = 0; i < ptnum; i++) { float tx = pts[i*3]; float ty = pts[i*3+1]; float tz = pts[i*3+2]; if (tz > cz) continue; float d2c = sqrt((tx-cx)*(tx-cx) + (ty-cy)*(ty-cy) + (tz-cz)*(tz-cz)); //printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); /* if (d2c < 1000) { printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); } */ //printf("tx: %.0f ty: %.0f tz: %.0f cx: %.0f cy: %.0f cz: %.0f \n", tx, ty, tz, cx, cy, cz); //printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); if (d2c >= 51 && d2c <= 54 ) { cnt += 1; } } scores[d_ix*100*400+d_iy*400+d_iz] = cnt; } __global__ void find_best_score_fine (int* scores, float* xyz_limits, float* device_pred_xyz) { int c_best = 0; device_pred_xyz[0] = -10000; device_pred_xyz[1] = -10000; device_pred_xyz[2] = -10000; int ixmax = int((xyz_limits[1] - xyz_limits[0])/2); if (ixmax > 100) ixmax = 100; int iymax = int((xyz_limits[3] - xyz_limits[2])/2); if (iymax > 100) iymax = 100; int izmax = int((xyz_limits[5] - xyz_limits[4])/2); if (izmax > 400) izmax = 400; printf("ixmax : %d; iymax : %d; izmax : %d\n", ixmax, iymax, izmax); for (int ix = 0; ix < ixmax; ix++) { for (int iy = 0; iy < iymax; iy++) { for (int iz = 0; iz < izmax; iz++) { //c_best = c_best > scores[ix*100*400+iy*400+iz] ? c_best : scores[ix*100*400+iy*400+iz]; if (c_best < scores[ix*100*400+iy*400+iz]) { c_best = scores[ix*100*400+iy*400+iz]; device_pred_xyz[0] = xyz_limits[0] + 5*ix; device_pred_xyz[1] = xyz_limits[2] + 5*iy; device_pred_xyz[2] = xyz_limits[4] + 5*iz; //printf("Score: %d x: %.0f y: %.0f z:%.0f \n", c_best, device_pred_xyz[0], device_pred_xyz[1], device_pred_xyz[2]); } } } } } void find_loc_fine(float* pts, int ptnum, int* scores, float* xyz_limits, float* device_pred_xyz) { //dim3 grid(10, 100, 1); //dim3 block(10, 1, 400); dim3 grid(100, 10, 8); dim3 block(1, 10, 50); para_find_loc_fine<<<grid, block>>>(pts, ptnum, scores, xyz_limits); find_best_score<<<1, 1>>>(scores, xyz_limits, device_pred_xyz); cudaDeviceSynchronize(); }
857949bc32c8af323a0c4fa9adfe058e503c5c55.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <gputiger/boltzmann.hpp> __device__ float sigma8_integrand::operator()(float x) const { const float R = 8 / littleh; const float c0 = float(9) / (2.f * float(M_PI) * float(M_PI)) / pow(R, 6); float k = EXP(x); cos_state U; einstein_boltzmann_init(&U, uni, k, 1.f, uni->amin); einstein_boltzmann(&U, uni, k, uni->amin, 1.f); float oc = opts.omega_c; float ob = opts.omega_b; float P = POW((oc*U[deltaci] + ob*U[deltabi])/(oc+ob), 2); return c0 * P * POW((SIN(k*R) - k * R *COS(k*R)), 2) * pow(k, -3); } __device__ void einstein_boltzmann_init(cos_state* uptr, const zero_order_universe* uni_ptr, float k, float normalization, float a) { cos_state& U = *uptr; const zero_order_universe& uni = *uni_ptr; const nvstd::function<float(float)>& Hubble = uni.hubble; float Oc, Ob, Ogam, Onu, Or; uni.compute_radiation_fractions(Ogam, Onu, a); uni.compute_matter_fractions(Oc, Ob, a); Or = Ogam + Onu; float hubble = Hubble(a); float eps = k / (a * hubble); float C = (float) 1.0 * POW(eps, (float ) -1.5) * normalization; float Rnu = Onu / Or; U[taui] = (float) 1.0 / (a * hubble); U[deltanui] = U[deltagami] = -(float) 2.0 / (float) 3.0 * C * eps * eps; U[deltabi] = U[deltaci] = (float) 3.0 / (float) 4.0 * U[deltagami]; U[thetabi] = U[thetagami] = -C / (float) 18.0 * eps * eps * eps; U[thetanui] = ((float) 23 + (float) 4 * Rnu) / ((float) 15 + (float) 4 * Rnu) * U[thetagami]; U[N2i] = (float) 0.5 * ((float) 4.0 * C) / ((float) 3.0 * ((float) 15 + (float) 4 * Rnu)) * eps * eps; U[hdoti] = (float) (float) 2.0 * C * eps * eps; U[G0i] = U[G1i] = U[G2i] = U[F2i] = (float) 0.0; for (int l = 3; l < LMAX; l++) { U[FLi + l] = (float) 0.0; U[NLi + l] = (float) 0.0; U[GLi + l] = (float) 0.0; } U[etai] = ((float) 0.5 * U[hdoti] - ((float) 1.5 * (Oc * U[deltaci] + Ob * U[deltabi]) + (float) 1.5 * (Ogam * U[deltagami] + Onu * U[deltanui]))) / (eps * eps); } __device__ void einstein_boltzmann(cos_state* uptr, const zero_order_universe *uni_ptr, float k, float amin, float amax) { const auto &uni = *uni_ptr; if (amin < uni.amin && amax > uni.amax) { printf("out of range error in einstein_boltzmann\n"); } cos_state& U = *uptr; cos_state U0; const nvstd::function<float(float)>& Hubble = uni.hubble; float loga = LOG(amin); float logamax = LOG(amax); float omega_m = opts.omega_b + opts.omega_c; float omega_r = opts.omega_gam + opts.omega_nu; while (loga < logamax) { float Oc, Ob, Ogam, Onu, Or; float a = EXP(loga); float hubble = Hubble(a); float eps = k / (a * hubble); uni.compute_radiation_fractions(Ogam, Onu, a); uni.compute_matter_fractions(Oc, Ob, a); Or = Ogam + Onu; float cs2 = uni.cs2(a); float lambda_i = 0.0; lambda_i = max(lambda_i, SQRT( ((float) LMAX + (float) 1.0) / ((float) LMAX + (float) 3.0)) * eps); lambda_i = max(lambda_i, SQRT( (float) 3.0 * POW(eps, 4) + (float) 8.0 * eps * eps * Or) / SQRT((float ) 5) / eps); float lambda_r = (eps + SQRT(eps * eps + (float) 4.0 * cs2 * POW(eps, (float) 4))) / ((float) 2.0 * eps); float dloga_i = (float) 2.0 * (float) 1.73 / lambda_i; float dloga_r = (float) 2.0 * (float) 2.51 / lambda_r; float dloga = min(min((float) 5e-2, min((float) 0.9 * dloga_i, (float) 0.9 * dloga_r)), logamax - loga); float loga0 = loga; const auto compute_explicit = [&](int step) { U0 = U; cos_state dudt; constexpr float beta[3] = {1, 0.25, (2.0 / 3.0)}; constexpr float tm[3] = {0, 1, 0.5}; for (int i = 0; i < 3; i++) { loga = loga0 + (float) 0.5 * (tm[i] + step) * dloga; a = EXP(loga); hubble = Hubble(a); eps = k / (a * hubble); uni.compute_radiation_fractions(Ogam,Onu,a); uni.compute_matter_fractions(Oc,Ob,a); Or = Ogam + Onu; cs2 = uni.cs2(a); dudt[taui] = (float) 1.0 / (a * hubble); dudt[etai] = ((float) 1.5 * ((Ob * U[thetabi]) + ((float) 4.0 / (float) 3.0) * (Ogam * U[thetagami] + Onu * U[thetanui])) / eps); float factor = ((a * omega_m) + (float) 4 * a * a * a * a * ((float) 1 - omega_m - omega_r)) / ((float) 2 * a * omega_m + (float) 2 * omega_r + (float) 2 * a * a * a * a * ((float) 1 - omega_m - omega_r)); dudt[hdoti] = (-factor * U[hdoti] - ((float) 3.0 * (Oc * U[deltaci] + Ob * U[deltabi]) + (float) 6.0 * (Ogam * U[deltagami] + Onu * U[deltanui]))); dudt[deltaci] = -(float) 0.5 * U[hdoti]; dudt[deltabi] = -eps * U[thetabi] - (float) 0.5 * U[hdoti]; dudt[deltagami] = -(float) 4.0 / (float) 3.0 * eps * U[thetagami] - ((float) 2.0 / (float) 3.0) * U[hdoti]; dudt[deltanui] = -(float) 4.0 / (float) 3.0 * eps * U[thetanui] - ((float) 2.0 / (float) 3.0) * U[hdoti]; dudt[thetabi] = -U[thetabi] + cs2 * eps * U[deltabi]; dudt[thetagami] = eps * ((float) 0.25 * U[deltagami] - (float) 0.5 * U[F2i]); dudt[thetanui] = eps * ((float) 0.25 * U[deltanui] - (float) 0.5 * U[N2i]); dudt[F2i] = ((float) 8.0 / (float) 15.0) * eps * U[thetagami] + ((float) 4.0 / (float) 15.0) * U[hdoti] + ((float) 8.0 / (float) 5.0) * dudt[etai] - ((float) 3.0 / (float) 5.0) * eps * U[FLi + 3]; dudt[N2i] = ((float) 8.0 / (float) 15.0) * eps * U[thetanui] + ((float) 4.0 / (float) 15.0) * U[hdoti] + ((float) 8.0 / (float) 5.0) * dudt[etai] - ((float) 3.0 / (float) 5.0) * eps * U[NLi + 3]; dudt[GLi + 0] = -eps * U[GLi + 1]; dudt[GLi + 1] = eps / (float) (3) * (U[GLi + 0] - (float) 2 * U[GLi + 2]); dudt[GLi + 2] = eps / (float) (5) * ((float) 2 * U[GLi + 1] - (float) 3 * U[GLi + 3]); for (int l = 3; l < LMAX - 1; l++) { dudt[FLi + l] = eps / (float) (2 * l + 1) * ((float) l * U[FLi - 1 + l] - (float) (l + 1) * U[FLi + 1 + l]); dudt[NLi + l] = eps / (float) (2 * l + 1) * ((float) l * U[NLi - 1 + l] - (float) (l + 1) * U[NLi + 1 + l]); dudt[GLi + l] = eps / (float) (2 * l + 1) * ((float) l * U[GLi - 1 + l] - (float) (l + 1) * U[GLi + 1 + l]); } dudt[FLi + LMAX - 1] = (eps * U[FLi + LMAX - 2]) / (float) (2 * LMAX - 1); dudt[NLi + LMAX - 1] = (eps * U[NLi + LMAX - 2]) / (float) (2 * LMAX - 1); dudt[GLi + LMAX - 1] = (eps * U[GLi + LMAX - 2]) / (float) (2 * LMAX - 1); for (int f = 0; f < NFIELD; f++) { U[f] = ((float) 1 - beta[i]) * U0[f] + beta[i] * (U[f] + dudt[f] * dloga * (float) 0.5); } } }; auto compute_implicit_dudt = [&](float loga, float dloga) { a = EXP(loga); float thetab = U[thetabi]; float thetagam = U[thetagami]; float F2 = U[F2i]; float G0 = U[G0i]; float G1 = U[G1i]; float G2 = U[G2i]; float thetab0 = thetab; float thetagam0 = thetagam; float F20 = F2; float G00 = G0; float G10 = G1; float G20 = G2; float sigma = uni.sigma_T(a); thetab = -((-(float) 3 * Ob * thetab0 - (float) 3 * dloga * Ob * sigma * thetab0 - (float) 4 * dloga * Ogam * sigma * thetagam0) / ((float) 3 * Ob + (float) 3 * dloga * Ob * sigma + (float) 4 * dloga * Ogam * sigma)); thetagam = -((-(float) 3 * dloga * Ob * sigma * thetab0 - (float) 3 * Ob * thetagam0 - (float) 4 * dloga * Ogam * sigma * thetagam0) / ((float) 3 * Ob + (float) 3 * dloga * (float) Ob * sigma + (float) 4 * dloga * Ogam * sigma)); F2 = -((-(float) 10 * F20 - (float) 4 * dloga * F20 * sigma - dloga * G00 * sigma - dloga * G20 * sigma) / (((float) 1 + dloga * sigma) * ((float) 10 + (float) 3 * dloga * sigma))); G0 = -((-(float) 10 * G00 - (float) 5 * dloga * F20 * sigma - (float) 8 * dloga * G00 * sigma - (float) 5 * dloga * G20 * sigma) / (((float) 1 + dloga * sigma) * ((float) 10 + (float) 3 * dloga * sigma))); G1 = G10 / ((float) 1 + dloga * sigma); G2 = -((-(float) 10 * G20 - dloga * F20 * sigma - dloga * G00 * sigma - (float) 4 * dloga * G20 * sigma) / (((float) 1 + dloga * sigma) * ((float) 10 + (float) 3 * dloga * sigma))); array<float, NFIELD> dudt; for (int f = 0; f < NFIELD; f++) { dudt[f] = (float) 0.0; } dudt[thetabi] = (thetab - thetab0) / dloga; dudt[thetagami] = (thetagam - thetagam0) / dloga; dudt[F2i] = (F2 - F20) / dloga; dudt[G0i] = (G0 - G00) / dloga; dudt[G1i] = (G1 - G10) / dloga; dudt[G2i] = (G2 - G20) / dloga; for (int l = 3; l < LMAX - 1; l++) { dudt[GLi + l] = U[GLi + l] * ((float) 1 / ((float) 1 + dloga * sigma) - (float) 1) / dloga; dudt[FLi + l] = U[FLi + l] * ((float) 1 / ((float) 1 + dloga * sigma) - (float) 1) / dloga; } dudt[GLi + LMAX - 1] = U[GLi + LMAX - 1] * ((float) 1 / ((float) 1 + (sigma + (float) LMAX / (U[taui] * a * hubble) / ((float) 2 * (float) LMAX - (float) 1))) - (float) 1) / dloga; dudt[FLi + LMAX - 1] = U[FLi + LMAX - 1] * ((float) 1 / ((float) 1 + (sigma + (float) LMAX / (U[taui] * a * hubble) / ((float) 2 * (float) LMAX - (float) 1))) - (float) 1) / dloga; return dudt; }; compute_explicit(0); float gamma = (float) 1.0 - (float) 1.0 / SQRT((float ) 2); auto dudt1 = compute_implicit_dudt(loga + gamma * dloga, gamma * dloga); for (int f = 0; f < NFIELD; f++) { U[f] += dudt1[f] * ((float) 1.0 - (float) 2.0 * gamma) * dloga; } auto dudt2 = compute_implicit_dudt(loga + ((float) 1.0 - gamma) * dloga, gamma * dloga); for (int f = 0; f < NFIELD; f++) { U[f] += (dudt1[f] * ((float) -0.5 + (float) 2.0 * gamma) + dudt2[f] * (float) 0.5) * dloga; } compute_explicit(1); loga = loga0 + dloga; } } __device__ void einstein_boltzmann_init_set(cos_state* U, zero_order_universe* uni, float kmin, float kmax, int N, float amin, float normalization) { float logkmin = LOG(kmin); float logkmax = LOG(kmax); float dlogk = (logkmax - logkmin) / (N - 1); for (int i = threadIdx.x; i < N; i += blockDim.x) { float k = EXP(logkmin + (float ) i * dlogk); einstein_boltzmann_init(U + i, uni, k, normalization, uni->amin); } __syncthreads(); } __device__ void einstein_boltzmann_interpolation_function(interp_functor<float>* den_k_func, interp_functor<float>* vel_k_func, cos_state* U, zero_order_universe* uni, float kmin, float kmax, int N, float astart, float astop) { int thread = threadIdx.x; int block_size = blockDim.x; __shared__ vector<float>* dptr; __shared__ vector<float>* vptr; float dlogk = 1.0e-2; float logkmin = LOG(kmin) - dlogk; float logkmax = LOG(kmax) + dlogk; dlogk = (logkmax - logkmin) / (float) (N - 1); if (thread == 0) { dptr = new vector<float>(N); vptr = new vector<float>(N); } __syncthreads(); auto& den_k = *dptr; auto& vel_k = *vptr; float oc = opts.omega_c; float ob = opts.omega_b; float om = oc + ob; oc /= om; ob /= om; float H = uni->hubble(astop); for (int i = thread; i < N; i += block_size) { float k = EXP(logkmin + (float ) i * dlogk); float eps = k / (astop * H); einstein_boltzmann(U + i, uni, k, astart, astop); den_k[i] = POW(ob*U[i][deltabi]+oc*U[i][deltaci], 2); vel_k[i] = POW((ob*(eps*U[i][thetabi]+(float)0.5*U[i][hdoti]) + oc*((float) 0.5 * U[i][hdoti]))/k*H, 2.f); } __syncthreads(); if (thread == 0) { build_interpolation_function(den_k_func, den_k, EXP(logkmin), EXP(logkmax)); build_interpolation_function(vel_k_func, vel_k, EXP(logkmin), EXP(logkmax)); } __syncthreads(); if (thread == 0) { delete dptr; delete vptr; } }
857949bc32c8af323a0c4fa9adfe058e503c5c55.cu
#include <gputiger/boltzmann.hpp> __device__ float sigma8_integrand::operator()(float x) const { const float R = 8 / littleh; const float c0 = float(9) / (2.f * float(M_PI) * float(M_PI)) / pow(R, 6); float k = EXP(x); cos_state U; einstein_boltzmann_init(&U, uni, k, 1.f, uni->amin); einstein_boltzmann(&U, uni, k, uni->amin, 1.f); float oc = opts.omega_c; float ob = opts.omega_b; float P = POW((oc*U[deltaci] + ob*U[deltabi])/(oc+ob), 2); return c0 * P * POW((SIN(k*R) - k * R *COS(k*R)), 2) * pow(k, -3); } __device__ void einstein_boltzmann_init(cos_state* uptr, const zero_order_universe* uni_ptr, float k, float normalization, float a) { cos_state& U = *uptr; const zero_order_universe& uni = *uni_ptr; const nvstd::function<float(float)>& Hubble = uni.hubble; float Oc, Ob, Ogam, Onu, Or; uni.compute_radiation_fractions(Ogam, Onu, a); uni.compute_matter_fractions(Oc, Ob, a); Or = Ogam + Onu; float hubble = Hubble(a); float eps = k / (a * hubble); float C = (float) 1.0 * POW(eps, (float ) -1.5) * normalization; float Rnu = Onu / Or; U[taui] = (float) 1.0 / (a * hubble); U[deltanui] = U[deltagami] = -(float) 2.0 / (float) 3.0 * C * eps * eps; U[deltabi] = U[deltaci] = (float) 3.0 / (float) 4.0 * U[deltagami]; U[thetabi] = U[thetagami] = -C / (float) 18.0 * eps * eps * eps; U[thetanui] = ((float) 23 + (float) 4 * Rnu) / ((float) 15 + (float) 4 * Rnu) * U[thetagami]; U[N2i] = (float) 0.5 * ((float) 4.0 * C) / ((float) 3.0 * ((float) 15 + (float) 4 * Rnu)) * eps * eps; U[hdoti] = (float) (float) 2.0 * C * eps * eps; U[G0i] = U[G1i] = U[G2i] = U[F2i] = (float) 0.0; for (int l = 3; l < LMAX; l++) { U[FLi + l] = (float) 0.0; U[NLi + l] = (float) 0.0; U[GLi + l] = (float) 0.0; } U[etai] = ((float) 0.5 * U[hdoti] - ((float) 1.5 * (Oc * U[deltaci] + Ob * U[deltabi]) + (float) 1.5 * (Ogam * U[deltagami] + Onu * U[deltanui]))) / (eps * eps); } __device__ void einstein_boltzmann(cos_state* uptr, const zero_order_universe *uni_ptr, float k, float amin, float amax) { const auto &uni = *uni_ptr; if (amin < uni.amin && amax > uni.amax) { printf("out of range error in einstein_boltzmann\n"); } cos_state& U = *uptr; cos_state U0; const nvstd::function<float(float)>& Hubble = uni.hubble; float loga = LOG(amin); float logamax = LOG(amax); float omega_m = opts.omega_b + opts.omega_c; float omega_r = opts.omega_gam + opts.omega_nu; while (loga < logamax) { float Oc, Ob, Ogam, Onu, Or; float a = EXP(loga); float hubble = Hubble(a); float eps = k / (a * hubble); uni.compute_radiation_fractions(Ogam, Onu, a); uni.compute_matter_fractions(Oc, Ob, a); Or = Ogam + Onu; float cs2 = uni.cs2(a); float lambda_i = 0.0; lambda_i = max(lambda_i, SQRT( ((float) LMAX + (float) 1.0) / ((float) LMAX + (float) 3.0)) * eps); lambda_i = max(lambda_i, SQRT( (float) 3.0 * POW(eps, 4) + (float) 8.0 * eps * eps * Or) / SQRT((float ) 5) / eps); float lambda_r = (eps + SQRT(eps * eps + (float) 4.0 * cs2 * POW(eps, (float) 4))) / ((float) 2.0 * eps); float dloga_i = (float) 2.0 * (float) 1.73 / lambda_i; float dloga_r = (float) 2.0 * (float) 2.51 / lambda_r; float dloga = min(min((float) 5e-2, min((float) 0.9 * dloga_i, (float) 0.9 * dloga_r)), logamax - loga); float loga0 = loga; const auto compute_explicit = [&](int step) { U0 = U; cos_state dudt; constexpr float beta[3] = {1, 0.25, (2.0 / 3.0)}; constexpr float tm[3] = {0, 1, 0.5}; for (int i = 0; i < 3; i++) { loga = loga0 + (float) 0.5 * (tm[i] + step) * dloga; a = EXP(loga); hubble = Hubble(a); eps = k / (a * hubble); uni.compute_radiation_fractions(Ogam,Onu,a); uni.compute_matter_fractions(Oc,Ob,a); Or = Ogam + Onu; cs2 = uni.cs2(a); dudt[taui] = (float) 1.0 / (a * hubble); dudt[etai] = ((float) 1.5 * ((Ob * U[thetabi]) + ((float) 4.0 / (float) 3.0) * (Ogam * U[thetagami] + Onu * U[thetanui])) / eps); float factor = ((a * omega_m) + (float) 4 * a * a * a * a * ((float) 1 - omega_m - omega_r)) / ((float) 2 * a * omega_m + (float) 2 * omega_r + (float) 2 * a * a * a * a * ((float) 1 - omega_m - omega_r)); dudt[hdoti] = (-factor * U[hdoti] - ((float) 3.0 * (Oc * U[deltaci] + Ob * U[deltabi]) + (float) 6.0 * (Ogam * U[deltagami] + Onu * U[deltanui]))); dudt[deltaci] = -(float) 0.5 * U[hdoti]; dudt[deltabi] = -eps * U[thetabi] - (float) 0.5 * U[hdoti]; dudt[deltagami] = -(float) 4.0 / (float) 3.0 * eps * U[thetagami] - ((float) 2.0 / (float) 3.0) * U[hdoti]; dudt[deltanui] = -(float) 4.0 / (float) 3.0 * eps * U[thetanui] - ((float) 2.0 / (float) 3.0) * U[hdoti]; dudt[thetabi] = -U[thetabi] + cs2 * eps * U[deltabi]; dudt[thetagami] = eps * ((float) 0.25 * U[deltagami] - (float) 0.5 * U[F2i]); dudt[thetanui] = eps * ((float) 0.25 * U[deltanui] - (float) 0.5 * U[N2i]); dudt[F2i] = ((float) 8.0 / (float) 15.0) * eps * U[thetagami] + ((float) 4.0 / (float) 15.0) * U[hdoti] + ((float) 8.0 / (float) 5.0) * dudt[etai] - ((float) 3.0 / (float) 5.0) * eps * U[FLi + 3]; dudt[N2i] = ((float) 8.0 / (float) 15.0) * eps * U[thetanui] + ((float) 4.0 / (float) 15.0) * U[hdoti] + ((float) 8.0 / (float) 5.0) * dudt[etai] - ((float) 3.0 / (float) 5.0) * eps * U[NLi + 3]; dudt[GLi + 0] = -eps * U[GLi + 1]; dudt[GLi + 1] = eps / (float) (3) * (U[GLi + 0] - (float) 2 * U[GLi + 2]); dudt[GLi + 2] = eps / (float) (5) * ((float) 2 * U[GLi + 1] - (float) 3 * U[GLi + 3]); for (int l = 3; l < LMAX - 1; l++) { dudt[FLi + l] = eps / (float) (2 * l + 1) * ((float) l * U[FLi - 1 + l] - (float) (l + 1) * U[FLi + 1 + l]); dudt[NLi + l] = eps / (float) (2 * l + 1) * ((float) l * U[NLi - 1 + l] - (float) (l + 1) * U[NLi + 1 + l]); dudt[GLi + l] = eps / (float) (2 * l + 1) * ((float) l * U[GLi - 1 + l] - (float) (l + 1) * U[GLi + 1 + l]); } dudt[FLi + LMAX - 1] = (eps * U[FLi + LMAX - 2]) / (float) (2 * LMAX - 1); dudt[NLi + LMAX - 1] = (eps * U[NLi + LMAX - 2]) / (float) (2 * LMAX - 1); dudt[GLi + LMAX - 1] = (eps * U[GLi + LMAX - 2]) / (float) (2 * LMAX - 1); for (int f = 0; f < NFIELD; f++) { U[f] = ((float) 1 - beta[i]) * U0[f] + beta[i] * (U[f] + dudt[f] * dloga * (float) 0.5); } } }; auto compute_implicit_dudt = [&](float loga, float dloga) { a = EXP(loga); float thetab = U[thetabi]; float thetagam = U[thetagami]; float F2 = U[F2i]; float G0 = U[G0i]; float G1 = U[G1i]; float G2 = U[G2i]; float thetab0 = thetab; float thetagam0 = thetagam; float F20 = F2; float G00 = G0; float G10 = G1; float G20 = G2; float sigma = uni.sigma_T(a); thetab = -((-(float) 3 * Ob * thetab0 - (float) 3 * dloga * Ob * sigma * thetab0 - (float) 4 * dloga * Ogam * sigma * thetagam0) / ((float) 3 * Ob + (float) 3 * dloga * Ob * sigma + (float) 4 * dloga * Ogam * sigma)); thetagam = -((-(float) 3 * dloga * Ob * sigma * thetab0 - (float) 3 * Ob * thetagam0 - (float) 4 * dloga * Ogam * sigma * thetagam0) / ((float) 3 * Ob + (float) 3 * dloga * (float) Ob * sigma + (float) 4 * dloga * Ogam * sigma)); F2 = -((-(float) 10 * F20 - (float) 4 * dloga * F20 * sigma - dloga * G00 * sigma - dloga * G20 * sigma) / (((float) 1 + dloga * sigma) * ((float) 10 + (float) 3 * dloga * sigma))); G0 = -((-(float) 10 * G00 - (float) 5 * dloga * F20 * sigma - (float) 8 * dloga * G00 * sigma - (float) 5 * dloga * G20 * sigma) / (((float) 1 + dloga * sigma) * ((float) 10 + (float) 3 * dloga * sigma))); G1 = G10 / ((float) 1 + dloga * sigma); G2 = -((-(float) 10 * G20 - dloga * F20 * sigma - dloga * G00 * sigma - (float) 4 * dloga * G20 * sigma) / (((float) 1 + dloga * sigma) * ((float) 10 + (float) 3 * dloga * sigma))); array<float, NFIELD> dudt; for (int f = 0; f < NFIELD; f++) { dudt[f] = (float) 0.0; } dudt[thetabi] = (thetab - thetab0) / dloga; dudt[thetagami] = (thetagam - thetagam0) / dloga; dudt[F2i] = (F2 - F20) / dloga; dudt[G0i] = (G0 - G00) / dloga; dudt[G1i] = (G1 - G10) / dloga; dudt[G2i] = (G2 - G20) / dloga; for (int l = 3; l < LMAX - 1; l++) { dudt[GLi + l] = U[GLi + l] * ((float) 1 / ((float) 1 + dloga * sigma) - (float) 1) / dloga; dudt[FLi + l] = U[FLi + l] * ((float) 1 / ((float) 1 + dloga * sigma) - (float) 1) / dloga; } dudt[GLi + LMAX - 1] = U[GLi + LMAX - 1] * ((float) 1 / ((float) 1 + (sigma + (float) LMAX / (U[taui] * a * hubble) / ((float) 2 * (float) LMAX - (float) 1))) - (float) 1) / dloga; dudt[FLi + LMAX - 1] = U[FLi + LMAX - 1] * ((float) 1 / ((float) 1 + (sigma + (float) LMAX / (U[taui] * a * hubble) / ((float) 2 * (float) LMAX - (float) 1))) - (float) 1) / dloga; return dudt; }; compute_explicit(0); float gamma = (float) 1.0 - (float) 1.0 / SQRT((float ) 2); auto dudt1 = compute_implicit_dudt(loga + gamma * dloga, gamma * dloga); for (int f = 0; f < NFIELD; f++) { U[f] += dudt1[f] * ((float) 1.0 - (float) 2.0 * gamma) * dloga; } auto dudt2 = compute_implicit_dudt(loga + ((float) 1.0 - gamma) * dloga, gamma * dloga); for (int f = 0; f < NFIELD; f++) { U[f] += (dudt1[f] * ((float) -0.5 + (float) 2.0 * gamma) + dudt2[f] * (float) 0.5) * dloga; } compute_explicit(1); loga = loga0 + dloga; } } __device__ void einstein_boltzmann_init_set(cos_state* U, zero_order_universe* uni, float kmin, float kmax, int N, float amin, float normalization) { float logkmin = LOG(kmin); float logkmax = LOG(kmax); float dlogk = (logkmax - logkmin) / (N - 1); for (int i = threadIdx.x; i < N; i += blockDim.x) { float k = EXP(logkmin + (float ) i * dlogk); einstein_boltzmann_init(U + i, uni, k, normalization, uni->amin); } __syncthreads(); } __device__ void einstein_boltzmann_interpolation_function(interp_functor<float>* den_k_func, interp_functor<float>* vel_k_func, cos_state* U, zero_order_universe* uni, float kmin, float kmax, int N, float astart, float astop) { int thread = threadIdx.x; int block_size = blockDim.x; __shared__ vector<float>* dptr; __shared__ vector<float>* vptr; float dlogk = 1.0e-2; float logkmin = LOG(kmin) - dlogk; float logkmax = LOG(kmax) + dlogk; dlogk = (logkmax - logkmin) / (float) (N - 1); if (thread == 0) { dptr = new vector<float>(N); vptr = new vector<float>(N); } __syncthreads(); auto& den_k = *dptr; auto& vel_k = *vptr; float oc = opts.omega_c; float ob = opts.omega_b; float om = oc + ob; oc /= om; ob /= om; float H = uni->hubble(astop); for (int i = thread; i < N; i += block_size) { float k = EXP(logkmin + (float ) i * dlogk); float eps = k / (astop * H); einstein_boltzmann(U + i, uni, k, astart, astop); den_k[i] = POW(ob*U[i][deltabi]+oc*U[i][deltaci], 2); vel_k[i] = POW((ob*(eps*U[i][thetabi]+(float)0.5*U[i][hdoti]) + oc*((float) 0.5 * U[i][hdoti]))/k*H, 2.f); } __syncthreads(); if (thread == 0) { build_interpolation_function(den_k_func, den_k, EXP(logkmin), EXP(logkmax)); build_interpolation_function(vel_k_func, vel_k, EXP(logkmin), EXP(logkmax)); } __syncthreads(); if (thread == 0) { delete dptr; delete vptr; } }
dedf06648a8245d8b877f06e4b82e5f170906588.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdio.h> #include <cassert> #include <hipcub/hipcub.hpp> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/inference/tensorrt/plugin/qkv_to_context_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_utils.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/phi/kernels/funcs/blas/blas.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) template <typename T> __global__ void transpose(T *src, T *dst, const int batch_size, const int seq_len, const int head_num, const int size_per_head) { int batch_id = blockIdx.x / (head_num * seq_len); int seq_id = blockIdx.x % seq_len; int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len; dst[batch_id * (head_num * seq_len * size_per_head) + seq_id * head_num * size_per_head + head_id * size_per_head + threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x]; } inline int round_up(int seq_len, int multiple = 32) { PADDLE_ENFORCE_GT( multiple, 0, platform::errors::InvalidArgument( "multiple should be a positive numberbut it's (%d)", multiple)); return ((seq_len + multiple - 1) / multiple) * multiple; } template <typename T> __global__ void reset_qk_bias(T *input, int real_seq_len, int seq_len) { if (threadIdx.x < seq_len) { int id = threadIdx.x + blockIdx.x * seq_len; input[id] = threadIdx.x >= real_seq_len ? (T)-1e20f : (T)0.0f; } } template <typename T> __global__ void transpose_qkv_padding( const T *src, // (Batch, real_seq_len, 3 , head_num * size_per_head) T *dst, // (3 * batch * head_num * seq_len * size_per_head) const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int real_seq_len) { // const dim3 grid(seq_len, batch, 3); // const dim3 block(head_size, head_num, 1); int qkv_id = blockIdx.z; int batch_id = blockIdx.y; int seq_id = blockIdx.x; int head_id = threadIdx.y; const int dst_offset = qkv_id * batch_size * head_num * seq_len * size_per_head + batch_id * head_num * seq_len * size_per_head + head_id * seq_len * size_per_head + seq_id * size_per_head; const int src_offset = batch_id * real_seq_len * 3 * head_num * size_per_head + seq_id * 3 * head_num * size_per_head + qkv_id * head_num * size_per_head + head_id * size_per_head; if (seq_id < real_seq_len) { dst[threadIdx.x + dst_offset] = src[threadIdx.x + src_offset]; } else if (seq_id < seq_len) { dst[threadIdx.x + dst_offset] = 0; } } template <typename T> __global__ void transpose_qkv_unpadding(const T *src, T *dst, const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int real_seq_len) { int batch_id = blockIdx.x / (head_num * real_seq_len); int seq_id = blockIdx.x % real_seq_len; int head_id = blockIdx.x % (head_num * real_seq_len) / real_seq_len; dst[batch_id * head_num * real_seq_len * size_per_head + seq_id * head_num * size_per_head + head_id * size_per_head + threadIdx.x] = src[batch_id * head_num * seq_len * size_per_head + head_id * seq_len * size_per_head + seq_id * size_per_head + threadIdx.x]; } template <typename T> __global__ void TransposeQkvKernel(const int H, const T *input, T *output) { // Input: BxSx3xNxH // Bias: 3xSxB // Output: 3xBxNxSxH int n = threadIdx.y; int s = blockIdx.x; int b = blockIdx.y; int m = blockIdx.z; const int N = blockDim.y; const int S = gridDim.x; const int B = gridDim.y; const int NH = N * H; const int NHS = NH * S; const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3; const int out_offset = s * H + n * S * H + b * NHS + m * NHS * B; const int i = threadIdx.x; output[out_offset + i] = input[in_offset + i]; } inline void TransposeQKV(const int batch, const int seq_len, const int head_size, const int head_num, const float *input, float *output, hipStream_t stream) { int scratch_size = batch * head_num * seq_len * seq_len; const dim3 grid(seq_len, batch, 3); if (head_size % 4 == 0 && scratch_size % 4 == 0) { const int h = head_size / 4; const float4 *input4 = reinterpret_cast<const float4 *>(input); float4 *output4 = reinterpret_cast<float4 *>(output); const dim3 block(h, head_num, 1); // limit h * head_num to max block size(1024). PADDLE_ENFORCE_LE(h * head_num, 1024, platform::errors::InvalidArgument( "head_num (%d) * head_size (%d) should <= %d", head_num, head_size, 1024 * 4)); hipLaunchKernelGGL(( TransposeQkvKernel<float4>), dim3(grid), dim3(block), 0, stream, h, input4, output4); } else if (head_size % 2 == 0 && scratch_size % 2 == 0) { const int h = head_size / 2; const float2 *input2 = reinterpret_cast<const float2 *>(input); float2 *output2 = reinterpret_cast<float2 *>(output); const dim3 block(h, head_num, 1); // limit h * head_num to max block size(1024). PADDLE_ENFORCE_LE(h * head_num, 1024, platform::errors::InvalidArgument( "head_num (%d) * head_size (%d) should <= %d", head_num, head_size, 1024 * 2)); hipLaunchKernelGGL(( TransposeQkvKernel<float2>), dim3(grid), dim3(block), 0, stream, h, input2, output2); } else { const dim3 block(head_size, head_num, 1); // limit head_size * head_num to max block size(1024). PADDLE_ENFORCE_LE(head_size * head_num, 1024, platform::errors::InvalidArgument( "head_num (%d) * head_size (%d) should <= %d", head_num, head_size, 1024)); hipLaunchKernelGGL(( TransposeQkvKernel<float>) , dim3(grid), dim3(block), 0, stream, head_size, input, output); } } inline void TransposeQKV(const int batch, const int seq_len, const int head_size, const int head_num, const half *input, half *output, hipStream_t stream) { int scratch_size = batch * head_num * seq_len * seq_len; const dim3 grid(seq_len, batch, 3); if (head_size % 8 == 0 && scratch_size % 8 == 0) { int h = head_size / 8; const int4 *input4 = reinterpret_cast<const int4 *>(input); int4 *output4 = reinterpret_cast<int4 *>(output); dim3 block(h, head_num, 1); // limit h * head_num to max block size(1024). PADDLE_ENFORCE_LE(h * head_num, 1024, platform::errors::InvalidArgument( "head_num (%d) * head_size (%d) should <= %d", head_num, head_size, 1024 * 8)); hipLaunchKernelGGL(( TransposeQkvKernel<int4>), dim3(grid), dim3(block), 0, stream, h, input4, output4); } else if (head_size % 2 == 0 && scratch_size % 2 == 0) { const int h = head_size / 2; const half2 *input2 = reinterpret_cast<const half2 *>(input); half2 *output2 = reinterpret_cast<half2 *>(output); const dim3 block(h, head_num, 1); // limit h * head_num to max block size(1024). PADDLE_ENFORCE_LE(h * head_num, 1024, platform::errors::InvalidArgument( "head_num (%d) * head_size (%d) should <= %d", head_num, head_size, 1024 * 2)); hipLaunchKernelGGL(( TransposeQkvKernel<half2>), dim3(grid), dim3(block), 0, stream, h, input2, output2); } else { const dim3 block(head_size, head_num, 1); // limit head_size * head_num to max block size(1024). PADDLE_ENFORCE_LE(head_size * head_num, 1024, platform::errors::InvalidArgument( "head_num (%d) * head_size (%d) should <= %d", head_num, head_size, 1024)); hipLaunchKernelGGL(( TransposeQkvKernel<half>) , dim3(grid), dim3(block), 0, stream, head_size, input, output); } } int QkvToContextPluginDynamic::initialize() TRT_NOEXCEPT { return 0; } nvinfer1::DimsExprs QkvToContextPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { // input[0], (B, S, 3 * N * H, 1, 1) // input[1], (B, head_num, seq_len, seq_len) // output, (B, seq_len, hidden) PADDLE_ENFORCE_EQ(output_index, 0, platform::errors::InvalidArgument( "There is only one output of the EmbEltwiseLayernorm, " "so the index should be zero," "but it's (%d)", output_index)); PADDLE_ENFORCE_EQ( nb_inputs, 2, platform::errors::InvalidArgument( "The Input of the EmbEltwiseLayernorm should be 3, but we found " "it has (%d) inputs", nb_inputs)); nvinfer1::DimsExprs ret; ret.nbDims = 3; ret.d[0] = inputs[0].d[0]; ret.d[1] = inputs[0].d[1]; ret.d[2] = expr_builder.constant(head_size_ * head_number_); return ret; } void QkvToContextPluginDynamic::configurePlugin( const nvinfer1::DynamicPluginTensorDesc *in, int nb_inputs, const nvinfer1::DynamicPluginTensorDesc *out, int nb_outputs) TRT_NOEXCEPT { auto input_dims = in[0].desc.dims; int batch = input_dims.d[0]; int real_seq_len = input_dims.d[1]; int seq_len = round_up(real_seq_len, 8); if (batch != -1 && real_seq_len != -1) { int device_id = 0; hipGetDevice(&device_id); auto *device_ctx = static_cast<phi::GPUContext *>( platform::DeviceContextPool::Instance().Get( platform::CUDAPlace(device_id))); const phi::GPUContext &dev_ctx = *device_ctx; auto stream = dev_ctx.stream(); tensor_.Resize({batch, seq_len, seq_len, head_number_}); int blocks = batch * head_number_ * seq_len; if (in[0].desc.type == nvinfer1::DataType::kHALF) { mask_half_ = reinterpret_cast<half *>( tensor_.mutable_data<int16_t>(platform::CUDAPlace(device_id))); hipLaunchKernelGGL(( reset_qk_bias), dim3(blocks), dim3(1024), 0, stream, mask_half_, real_seq_len, seq_len); } else if (in[0].desc.type == nvinfer1::DataType::kFLOAT) { fake_qk_bias_ = reinterpret_cast<float *>( tensor_.mutable_data<int32_t>(platform::CUDAPlace(device_id))); int64_t size = sizeof(int32_t) * batch * seq_len * seq_len * head_number_; #ifdef PADDLE_WITH_HIP PADDLE_ENFORCE_GPU_SUCCESS( hipMemsetAsync(fake_qk_bias_, 0, size, dev_ctx.stream())); #else PADDLE_ENFORCE_GPU_SUCCESS( hipMemsetAsync(fake_qk_bias_, 0, size, dev_ctx.stream())); #endif } else { PADDLE_THROW(platform::errors::Fatal( "The QKV TRT Plugin's input type should be float or half.")); } } } bool QkvToContextPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { if (with_fp16_) { #ifdef TRT_PLUGIN_FP16_AVALIABLE return (in.type == nvinfer1::DataType::kFLOAT || in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::TensorFormat::kLINEAR); #else return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); #endif } else { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; if (pos == 1) { return in.type == prev.type && in.format == prev.format; } // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType QkvToContextPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ( index, 0, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only has one input, so the " "index value should be 0, but get %d.", index)); return input_types[0]; } template <typename T> __global__ void apply_scale(T *data, T scale, int n) { #if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { data[tid] = data[tid] * scale; } #endif } template <typename T> __global__ void broadcast(const T *src, T *dst, const int seq_len, const int head_num) { int batch_id = blockIdx.x / (head_num * seq_len); int dst_offset = blockIdx.x * seq_len; if (threadIdx.x < seq_len) { dst[threadIdx.x + dst_offset] = src[threadIdx.x + batch_id * seq_len]; } } int QkvToContextPluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) TRT_NOEXCEPT { auto input_dims = input_desc[0].dims; int input_num = ProductDim(input_dims); // input[0], (B, S, 3 * N * H, 1, 1) int batch = input_dims.d[0]; int seq_len = input_dims.d[1]; phi::DenseTensor multihead_temp_tensor; int scratch_size = batch * head_number_ * seq_len * seq_len * 1; int device_id; hipGetDevice(&device_id); multihead_temp_tensor.Resize({scratch_size + input_num}); auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. QkvToContext-->fp32"; auto *multihead_temp_data = multihead_temp_tensor.mutable_data<float>( platform::CUDAPlace(device_id)); auto *qkptr = multihead_temp_data; auto *tptr = multihead_temp_data + scratch_size; const float *input0_data = static_cast<const float *>(inputs[0]); // fit to [batch, head_num, length, length] + [batch, 1, 1, length] phi::DenseTensor temp_qk_bias_tensor; float *qk_bias = const_cast<float *>(static_cast<const float *>(inputs[1])); if (ProductDim(input_desc[1].dims) == (batch * seq_len)) { temp_qk_bias_tensor.Resize({batch, head_number_, seq_len, seq_len}); auto *temp_qk_bias = temp_qk_bias_tensor.mutable_data<float>( platform::CUDAPlace(device_id)); int grid = batch * head_number_ * seq_len; int block = round_up(seq_len); hipLaunchKernelGGL(( broadcast), dim3(grid), dim3(block), 0, stream, static_cast<const float *>(inputs[1]), temp_qk_bias, seq_len, head_number_); qk_bias = temp_qk_bias; } // fake qk_bias if (ProductDim(input_desc[1].dims) == ProductDim(input_desc[0].dims)) { qk_bias = fake_qk_bias_; } const float *input1_data = static_cast<const float *>(qk_bias); // BxSx3xNxH => tptr: 3xBxNxSxH. TransposeQKV( batch, seq_len, head_size_, head_number_, input0_data, tptr, stream); auto *device_ctx = static_cast<phi::GPUContext *>( platform::DeviceContextPool::Instance().Get( platform::CUDAPlace(device_id))); const phi::GPUContext &dev_ctx = *device_ctx; operators::math::MultiHeadGPUComputeFunctor<float> multihead_compute_func; multihead_compute_func(dev_ctx, batch, seq_len, head_number_, head_size_, qkptr, input1_data, tptr, scale_, static_cast<float>(0.0)); int grid = batch * head_number_ * seq_len; int block = head_size_; float *output = static_cast<float *>(outputs[0]); hipLaunchKernelGGL(( transpose<float>), dim3(grid), dim3(block), 0, stream, tptr, output, batch, seq_len, head_number_, head_size_); } else if (input_type == nvinfer1::DataType::kHALF) { #ifdef TRT_PLUGIN_FP16_AVALIABLE VLOG(1) << "TRT Plugin DataType selected. QkvToContext-->fp16"; int real_seq_len = seq_len; int need_padding = false; // fake qk_bias if (ProductDim(input_desc[1].dims) == ProductDim(input_desc[0].dims)) { seq_len = round_up(real_seq_len, 8); scratch_size = batch * head_number_ * seq_len * seq_len * 1; input_num = batch * seq_len * 3 * head_number_ * head_size_; multihead_temp_tensor.Resize({scratch_size + input_num}); need_padding = (real_seq_len != seq_len) ? true : false; } auto *multihead_temp_data = multihead_temp_tensor.mutable_data<int16_t>( // NOLINT platform::CUDAPlace(device_id)); half *qkptr = reinterpret_cast<half *>(multihead_temp_data); half *tptr = qkptr + scratch_size; const half *input0_data = static_cast<const half *>(inputs[0]); // fit to [batch, head_num, length, length] + [batch, 1, 1, length] phi::DenseTensor temp_qk_bias_tensor; half *qk_bias = const_cast<half *>(static_cast<const half *>(inputs[1])); if (ProductDim(input_desc[1].dims) == (batch * seq_len)) { temp_qk_bias_tensor.Resize({batch, head_number_, seq_len, seq_len}); auto *temp_qk_bias = reinterpret_cast<half *>(temp_qk_bias_tensor.mutable_data<int16_t>( platform::CUDAPlace(device_id))); int grid = batch * head_number_ * seq_len; int block = round_up(seq_len); hipLaunchKernelGGL(( broadcast), dim3(grid), dim3(block), 0, stream, static_cast<const half *>(inputs[1]), temp_qk_bias, seq_len, head_number_); qk_bias = temp_qk_bias; } // padding: mask_half_ = [0,0,...-1e20f,-1e20f] // no_padding: mask_half_ = [0,.....0,.........,0] if (ProductDim(input_desc[1].dims) == ProductDim(input_desc[0].dims)) { qk_bias = mask_half_; } const half *input1_data = static_cast<const half *>(qk_bias); // BxSx3xNxH => tptr: 3xBxNxSxH. if (need_padding) { dim3 grid_p(seq_len, batch, 3); dim3 block_p(head_size_, head_number_, 1); hipLaunchKernelGGL(( transpose_qkv_padding), dim3(grid_p), dim3(block_p), 0, stream, input0_data, tptr, batch, seq_len, head_number_, head_size_, real_seq_len); } else { TransposeQKV( batch, seq_len, head_size_, head_number_, input0_data, tptr, stream); } auto *device_ctx = static_cast<phi::GPUContext *>( platform::DeviceContextPool::Instance().Get( platform::CUDAPlace(device_id))); int n_q = seq_len * head_number_ * head_size_ * batch; constexpr int threads = 128; int blocks = (n_q + threads - 1) / threads; hipLaunchKernelGGL(( apply_scale), dim3(blocks), dim3(threads), 0, stream, tptr, static_cast<half>(scale_), n_q); const phi::GPUContext &dev_ctx = *device_ctx; operators::math::MultiHeadGPUComputeFunctor<half> multihead_compute_func; multihead_compute_func(dev_ctx, batch, seq_len, head_number_, head_size_, qkptr, input1_data, tptr, half(1.), half(0.0)); int grid = batch * head_number_ * seq_len; int block = head_size_; half *output = static_cast<half *>(outputs[0]); if (need_padding) { int grid_u = batch * head_number_ * real_seq_len; int block_u = head_size_; hipLaunchKernelGGL(( transpose_qkv_unpadding<half>), dim3(grid_u), dim3(block_u), 0, stream, tptr, output, batch, seq_len, head_number_, head_size_, real_seq_len); } else { hipLaunchKernelGGL(( transpose<half>), dim3(grid), dim3(block), 0, stream, tptr, output, batch, seq_len, head_number_, head_size_); } #else PADDLE_THROW(platform::errors::Fatal( "The Ernie(Bert) TensorRT Plugin should be " "complied with CUDA version >= 10.0 when running with fp16. " "Please recomplie it or try to use fp32 by set " "config.SetTRTDynamicShapeInfo(min_input_shape, " "max_input_shape, opt_input_shape, true")); #endif } else { PADDLE_THROW(platform::errors::Fatal( "The QKV TRT Plugin's input type should be float or half.")); } return hipGetLastError() != hipSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
dedf06648a8245d8b877f06e4b82e5f170906588.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdio.h> #include <cassert> #include <cub/cub.cuh> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/inference/tensorrt/plugin/qkv_to_context_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_utils.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/phi/kernels/funcs/blas/blas.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) template <typename T> __global__ void transpose(T *src, T *dst, const int batch_size, const int seq_len, const int head_num, const int size_per_head) { int batch_id = blockIdx.x / (head_num * seq_len); int seq_id = blockIdx.x % seq_len; int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len; dst[batch_id * (head_num * seq_len * size_per_head) + seq_id * head_num * size_per_head + head_id * size_per_head + threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x]; } inline int round_up(int seq_len, int multiple = 32) { PADDLE_ENFORCE_GT( multiple, 0, platform::errors::InvalidArgument( "multiple should be a positive number,but it's (%d)", multiple)); return ((seq_len + multiple - 1) / multiple) * multiple; } template <typename T> __global__ void reset_qk_bias(T *input, int real_seq_len, int seq_len) { if (threadIdx.x < seq_len) { int id = threadIdx.x + blockIdx.x * seq_len; input[id] = threadIdx.x >= real_seq_len ? (T)-1e20f : (T)0.0f; } } template <typename T> __global__ void transpose_qkv_padding( const T *src, // (Batch, real_seq_len, 3 , head_num * size_per_head) T *dst, // (3 * batch * head_num * seq_len * size_per_head) const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int real_seq_len) { // const dim3 grid(seq_len, batch, 3); // const dim3 block(head_size, head_num, 1); int qkv_id = blockIdx.z; int batch_id = blockIdx.y; int seq_id = blockIdx.x; int head_id = threadIdx.y; const int dst_offset = qkv_id * batch_size * head_num * seq_len * size_per_head + batch_id * head_num * seq_len * size_per_head + head_id * seq_len * size_per_head + seq_id * size_per_head; const int src_offset = batch_id * real_seq_len * 3 * head_num * size_per_head + seq_id * 3 * head_num * size_per_head + qkv_id * head_num * size_per_head + head_id * size_per_head; if (seq_id < real_seq_len) { dst[threadIdx.x + dst_offset] = src[threadIdx.x + src_offset]; } else if (seq_id < seq_len) { dst[threadIdx.x + dst_offset] = 0; } } template <typename T> __global__ void transpose_qkv_unpadding(const T *src, T *dst, const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int real_seq_len) { int batch_id = blockIdx.x / (head_num * real_seq_len); int seq_id = blockIdx.x % real_seq_len; int head_id = blockIdx.x % (head_num * real_seq_len) / real_seq_len; dst[batch_id * head_num * real_seq_len * size_per_head + seq_id * head_num * size_per_head + head_id * size_per_head + threadIdx.x] = src[batch_id * head_num * seq_len * size_per_head + head_id * seq_len * size_per_head + seq_id * size_per_head + threadIdx.x]; } template <typename T> __global__ void TransposeQkvKernel(const int H, const T *input, T *output) { // Input: BxSx3xNxH // Bias: 3xSxB // Output: 3xBxNxSxH int n = threadIdx.y; int s = blockIdx.x; int b = blockIdx.y; int m = blockIdx.z; const int N = blockDim.y; const int S = gridDim.x; const int B = gridDim.y; const int NH = N * H; const int NHS = NH * S; const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3; const int out_offset = s * H + n * S * H + b * NHS + m * NHS * B; const int i = threadIdx.x; output[out_offset + i] = input[in_offset + i]; } inline void TransposeQKV(const int batch, const int seq_len, const int head_size, const int head_num, const float *input, float *output, cudaStream_t stream) { int scratch_size = batch * head_num * seq_len * seq_len; const dim3 grid(seq_len, batch, 3); if (head_size % 4 == 0 && scratch_size % 4 == 0) { const int h = head_size / 4; const float4 *input4 = reinterpret_cast<const float4 *>(input); float4 *output4 = reinterpret_cast<float4 *>(output); const dim3 block(h, head_num, 1); // limit h * head_num to max block size(1024). PADDLE_ENFORCE_LE(h * head_num, 1024, platform::errors::InvalidArgument( "head_num (%d) * head_size (%d) should <= %d", head_num, head_size, 1024 * 4)); TransposeQkvKernel<float4><<<grid, block, 0, stream>>>(h, input4, output4); } else if (head_size % 2 == 0 && scratch_size % 2 == 0) { const int h = head_size / 2; const float2 *input2 = reinterpret_cast<const float2 *>(input); float2 *output2 = reinterpret_cast<float2 *>(output); const dim3 block(h, head_num, 1); // limit h * head_num to max block size(1024). PADDLE_ENFORCE_LE(h * head_num, 1024, platform::errors::InvalidArgument( "head_num (%d) * head_size (%d) should <= %d", head_num, head_size, 1024 * 2)); TransposeQkvKernel<float2><<<grid, block, 0, stream>>>(h, input2, output2); } else { const dim3 block(head_size, head_num, 1); // limit head_size * head_num to max block size(1024). PADDLE_ENFORCE_LE(head_size * head_num, 1024, platform::errors::InvalidArgument( "head_num (%d) * head_size (%d) should <= %d", head_num, head_size, 1024)); TransposeQkvKernel<float> <<<grid, block, 0, stream>>>(head_size, input, output); } } inline void TransposeQKV(const int batch, const int seq_len, const int head_size, const int head_num, const half *input, half *output, cudaStream_t stream) { int scratch_size = batch * head_num * seq_len * seq_len; const dim3 grid(seq_len, batch, 3); if (head_size % 8 == 0 && scratch_size % 8 == 0) { int h = head_size / 8; const int4 *input4 = reinterpret_cast<const int4 *>(input); int4 *output4 = reinterpret_cast<int4 *>(output); dim3 block(h, head_num, 1); // limit h * head_num to max block size(1024). PADDLE_ENFORCE_LE(h * head_num, 1024, platform::errors::InvalidArgument( "head_num (%d) * head_size (%d) should <= %d", head_num, head_size, 1024 * 8)); TransposeQkvKernel<int4><<<grid, block, 0, stream>>>(h, input4, output4); } else if (head_size % 2 == 0 && scratch_size % 2 == 0) { const int h = head_size / 2; const half2 *input2 = reinterpret_cast<const half2 *>(input); half2 *output2 = reinterpret_cast<half2 *>(output); const dim3 block(h, head_num, 1); // limit h * head_num to max block size(1024). PADDLE_ENFORCE_LE(h * head_num, 1024, platform::errors::InvalidArgument( "head_num (%d) * head_size (%d) should <= %d", head_num, head_size, 1024 * 2)); TransposeQkvKernel<half2><<<grid, block, 0, stream>>>(h, input2, output2); } else { const dim3 block(head_size, head_num, 1); // limit head_size * head_num to max block size(1024). PADDLE_ENFORCE_LE(head_size * head_num, 1024, platform::errors::InvalidArgument( "head_num (%d) * head_size (%d) should <= %d", head_num, head_size, 1024)); TransposeQkvKernel<half> <<<grid, block, 0, stream>>>(head_size, input, output); } } int QkvToContextPluginDynamic::initialize() TRT_NOEXCEPT { return 0; } nvinfer1::DimsExprs QkvToContextPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { // input[0], (B, S, 3 * N * H, 1, 1) // input[1], (B, head_num, seq_len, seq_len) // output, (B, seq_len, hidden) PADDLE_ENFORCE_EQ(output_index, 0, platform::errors::InvalidArgument( "There is only one output of the EmbEltwiseLayernorm, " "so the index should be zero," "but it's (%d)", output_index)); PADDLE_ENFORCE_EQ( nb_inputs, 2, platform::errors::InvalidArgument( "The Input of the EmbEltwiseLayernorm should be 3, but we found " "it has (%d) inputs", nb_inputs)); nvinfer1::DimsExprs ret; ret.nbDims = 3; ret.d[0] = inputs[0].d[0]; ret.d[1] = inputs[0].d[1]; ret.d[2] = expr_builder.constant(head_size_ * head_number_); return ret; } void QkvToContextPluginDynamic::configurePlugin( const nvinfer1::DynamicPluginTensorDesc *in, int nb_inputs, const nvinfer1::DynamicPluginTensorDesc *out, int nb_outputs) TRT_NOEXCEPT { auto input_dims = in[0].desc.dims; int batch = input_dims.d[0]; int real_seq_len = input_dims.d[1]; int seq_len = round_up(real_seq_len, 8); if (batch != -1 && real_seq_len != -1) { int device_id = 0; cudaGetDevice(&device_id); auto *device_ctx = static_cast<phi::GPUContext *>( platform::DeviceContextPool::Instance().Get( platform::CUDAPlace(device_id))); const phi::GPUContext &dev_ctx = *device_ctx; auto stream = dev_ctx.stream(); tensor_.Resize({batch, seq_len, seq_len, head_number_}); int blocks = batch * head_number_ * seq_len; if (in[0].desc.type == nvinfer1::DataType::kHALF) { mask_half_ = reinterpret_cast<half *>( tensor_.mutable_data<int16_t>(platform::CUDAPlace(device_id))); reset_qk_bias<<<blocks, 1024, 0, stream>>>( mask_half_, real_seq_len, seq_len); } else if (in[0].desc.type == nvinfer1::DataType::kFLOAT) { fake_qk_bias_ = reinterpret_cast<float *>( tensor_.mutable_data<int32_t>(platform::CUDAPlace(device_id))); int64_t size = sizeof(int32_t) * batch * seq_len * seq_len * head_number_; #ifdef PADDLE_WITH_HIP PADDLE_ENFORCE_GPU_SUCCESS( hipMemsetAsync(fake_qk_bias_, 0, size, dev_ctx.stream())); #else PADDLE_ENFORCE_GPU_SUCCESS( cudaMemsetAsync(fake_qk_bias_, 0, size, dev_ctx.stream())); #endif } else { PADDLE_THROW(platform::errors::Fatal( "The QKV TRT Plugin's input type should be float or half.")); } } } bool QkvToContextPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { if (with_fp16_) { #ifdef TRT_PLUGIN_FP16_AVALIABLE return (in.type == nvinfer1::DataType::kFLOAT || in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::TensorFormat::kLINEAR); #else return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); #endif } else { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; if (pos == 1) { return in.type == prev.type && in.format == prev.format; } // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType QkvToContextPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ( index, 0, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only has one input, so the " "index value should be 0, but get %d.", index)); return input_types[0]; } template <typename T> __global__ void apply_scale(T *data, T scale, int n) { #if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { data[tid] = data[tid] * scale; } #endif } template <typename T> __global__ void broadcast(const T *src, T *dst, const int seq_len, const int head_num) { int batch_id = blockIdx.x / (head_num * seq_len); int dst_offset = blockIdx.x * seq_len; if (threadIdx.x < seq_len) { dst[threadIdx.x + dst_offset] = src[threadIdx.x + batch_id * seq_len]; } } int QkvToContextPluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT { auto input_dims = input_desc[0].dims; int input_num = ProductDim(input_dims); // input[0], (B, S, 3 * N * H, 1, 1) int batch = input_dims.d[0]; int seq_len = input_dims.d[1]; phi::DenseTensor multihead_temp_tensor; int scratch_size = batch * head_number_ * seq_len * seq_len * 1; int device_id; cudaGetDevice(&device_id); multihead_temp_tensor.Resize({scratch_size + input_num}); auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. QkvToContext-->fp32"; auto *multihead_temp_data = multihead_temp_tensor.mutable_data<float>( platform::CUDAPlace(device_id)); auto *qkptr = multihead_temp_data; auto *tptr = multihead_temp_data + scratch_size; const float *input0_data = static_cast<const float *>(inputs[0]); // fit to [batch, head_num, length, length] + [batch, 1, 1, length] phi::DenseTensor temp_qk_bias_tensor; float *qk_bias = const_cast<float *>(static_cast<const float *>(inputs[1])); if (ProductDim(input_desc[1].dims) == (batch * seq_len)) { temp_qk_bias_tensor.Resize({batch, head_number_, seq_len, seq_len}); auto *temp_qk_bias = temp_qk_bias_tensor.mutable_data<float>( platform::CUDAPlace(device_id)); int grid = batch * head_number_ * seq_len; int block = round_up(seq_len); broadcast<<<grid, block, 0, stream>>>( static_cast<const float *>(inputs[1]), temp_qk_bias, seq_len, head_number_); qk_bias = temp_qk_bias; } // fake qk_bias if (ProductDim(input_desc[1].dims) == ProductDim(input_desc[0].dims)) { qk_bias = fake_qk_bias_; } const float *input1_data = static_cast<const float *>(qk_bias); // BxSx3xNxH => tptr: 3xBxNxSxH. TransposeQKV( batch, seq_len, head_size_, head_number_, input0_data, tptr, stream); auto *device_ctx = static_cast<phi::GPUContext *>( platform::DeviceContextPool::Instance().Get( platform::CUDAPlace(device_id))); const phi::GPUContext &dev_ctx = *device_ctx; operators::math::MultiHeadGPUComputeFunctor<float> multihead_compute_func; multihead_compute_func(dev_ctx, batch, seq_len, head_number_, head_size_, qkptr, input1_data, tptr, scale_, static_cast<float>(0.0)); int grid = batch * head_number_ * seq_len; int block = head_size_; float *output = static_cast<float *>(outputs[0]); transpose<float><<<grid, block, 0, stream>>>( tptr, output, batch, seq_len, head_number_, head_size_); } else if (input_type == nvinfer1::DataType::kHALF) { #ifdef TRT_PLUGIN_FP16_AVALIABLE VLOG(1) << "TRT Plugin DataType selected. QkvToContext-->fp16"; int real_seq_len = seq_len; int need_padding = false; // fake qk_bias if (ProductDim(input_desc[1].dims) == ProductDim(input_desc[0].dims)) { seq_len = round_up(real_seq_len, 8); scratch_size = batch * head_number_ * seq_len * seq_len * 1; input_num = batch * seq_len * 3 * head_number_ * head_size_; multihead_temp_tensor.Resize({scratch_size + input_num}); need_padding = (real_seq_len != seq_len) ? true : false; } auto *multihead_temp_data = multihead_temp_tensor.mutable_data<int16_t>( // NOLINT platform::CUDAPlace(device_id)); half *qkptr = reinterpret_cast<half *>(multihead_temp_data); half *tptr = qkptr + scratch_size; const half *input0_data = static_cast<const half *>(inputs[0]); // fit to [batch, head_num, length, length] + [batch, 1, 1, length] phi::DenseTensor temp_qk_bias_tensor; half *qk_bias = const_cast<half *>(static_cast<const half *>(inputs[1])); if (ProductDim(input_desc[1].dims) == (batch * seq_len)) { temp_qk_bias_tensor.Resize({batch, head_number_, seq_len, seq_len}); auto *temp_qk_bias = reinterpret_cast<half *>(temp_qk_bias_tensor.mutable_data<int16_t>( platform::CUDAPlace(device_id))); int grid = batch * head_number_ * seq_len; int block = round_up(seq_len); broadcast<<<grid, block, 0, stream>>>( static_cast<const half *>(inputs[1]), temp_qk_bias, seq_len, head_number_); qk_bias = temp_qk_bias; } // padding: mask_half_ = [0,0,...-1e20f,-1e20f] // no_padding: mask_half_ = [0,.....0,.........,0] if (ProductDim(input_desc[1].dims) == ProductDim(input_desc[0].dims)) { qk_bias = mask_half_; } const half *input1_data = static_cast<const half *>(qk_bias); // BxSx3xNxH => tptr: 3xBxNxSxH. if (need_padding) { dim3 grid_p(seq_len, batch, 3); dim3 block_p(head_size_, head_number_, 1); transpose_qkv_padding<<<grid_p, block_p, 0, stream>>>(input0_data, tptr, batch, seq_len, head_number_, head_size_, real_seq_len); } else { TransposeQKV( batch, seq_len, head_size_, head_number_, input0_data, tptr, stream); } auto *device_ctx = static_cast<phi::GPUContext *>( platform::DeviceContextPool::Instance().Get( platform::CUDAPlace(device_id))); int n_q = seq_len * head_number_ * head_size_ * batch; constexpr int threads = 128; int blocks = (n_q + threads - 1) / threads; apply_scale<<<blocks, threads, 0, stream>>>( tptr, static_cast<half>(scale_), n_q); const phi::GPUContext &dev_ctx = *device_ctx; operators::math::MultiHeadGPUComputeFunctor<half> multihead_compute_func; multihead_compute_func(dev_ctx, batch, seq_len, head_number_, head_size_, qkptr, input1_data, tptr, half(1.), half(0.0)); int grid = batch * head_number_ * seq_len; int block = head_size_; half *output = static_cast<half *>(outputs[0]); if (need_padding) { int grid_u = batch * head_number_ * real_seq_len; int block_u = head_size_; transpose_qkv_unpadding<half><<<grid_u, block_u, 0, stream>>>( tptr, output, batch, seq_len, head_number_, head_size_, real_seq_len); } else { transpose<half><<<grid, block, 0, stream>>>( tptr, output, batch, seq_len, head_number_, head_size_); } #else PADDLE_THROW(platform::errors::Fatal( "The Ernie(Bert) TensorRT Plugin should be " "complied with CUDA version >= 10.0 when running with fp16. " "Please recomplie it or try to use fp32 by set " "config.SetTRTDynamicShapeInfo(min_input_shape, " "max_input_shape, opt_input_shape, true")); #endif } else { PADDLE_THROW(platform::errors::Fatal( "The QKV TRT Plugin's input type should be float or half.")); } return cudaGetLastError() != cudaSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
57a8c979ffadbff0edfbb09de0282bccf4fe33b3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime_api.h> #define BASE_TYPE float __global__ void init(BASE_TYPE *a, const int N) { int id = N * (blockDim.y * blockIdx.y + threadIdx.y) + blockDim.x * blockIdx.x + threadIdx.x; a[id] = id; } BASE_TYPE* gen_array(const int N) { BASE_TYPE *a = new BASE_TYPE[N * N]; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) a[i * N + j] = i * N + j; } return a; } void print_array(BASE_TYPE *a, const int N) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) printf("%3.0f ", a[i *N + j]); printf("\n"); } printf("\n"); } void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size) { hipError_t err; err = hipMalloc((void **)dev, size); if (err != hipSuccess) throw err; if (host != NULL) { err = hipMemcpy(*dev, host, size, hipMemcpyHostToDevice); if (err != hipSuccess) throw err; } } void cuda_init_grid_and_block(dim3 *grid, dim3 *block, const int N) { *grid = dim3(1); *block = dim3(N, N, 1); printf("Block %d %d %d\n", block->x, block->y, block->z); printf("Grid %d %d %d\n", grid->x, grid->y, grid->z); } int main() { const int N = 10; const size_t size = N * N * sizeof(BASE_TYPE); hipError_t err; dim3 threadsPerBlock, blocksPerGrid; cuda_init_grid_and_block(&blocksPerGrid, &threadsPerBlock, N); BASE_TYPE *host_a = gen_array(N); BASE_TYPE *dev_a; if (host_a == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } print_array(host_a, N); try { cuda_init_array(&dev_a, NULL, size); } catch (hipError_t err) { fprintf(stderr, "Failed to allocate device (error code: %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( init), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_a, N); err = hipMemcpy(host_a, dev_a, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device (error code: %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } print_array(host_a, N); hipFree(dev_a); delete[] host_a; return 0; }
57a8c979ffadbff0edfbb09de0282bccf4fe33b3.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime_api.h> #define BASE_TYPE float __global__ void init(BASE_TYPE *a, const int N) { int id = N * (blockDim.y * blockIdx.y + threadIdx.y) + blockDim.x * blockIdx.x + threadIdx.x; a[id] = id; } BASE_TYPE* gen_array(const int N) { BASE_TYPE *a = new BASE_TYPE[N * N]; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) a[i * N + j] = i * N + j; } return a; } void print_array(BASE_TYPE *a, const int N) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) printf("%3.0f ", a[i *N + j]); printf("\n"); } printf("\n"); } void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size) { cudaError_t err; err = cudaMalloc((void **)dev, size); if (err != cudaSuccess) throw err; if (host != NULL) { err = cudaMemcpy(*dev, host, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) throw err; } } void cuda_init_grid_and_block(dim3 *grid, dim3 *block, const int N) { *grid = dim3(1); *block = dim3(N, N, 1); printf("Block %d %d %d\n", block->x, block->y, block->z); printf("Grid %d %d %d\n", grid->x, grid->y, grid->z); } int main() { const int N = 10; const size_t size = N * N * sizeof(BASE_TYPE); cudaError_t err; dim3 threadsPerBlock, blocksPerGrid; cuda_init_grid_and_block(&blocksPerGrid, &threadsPerBlock, N); BASE_TYPE *host_a = gen_array(N); BASE_TYPE *dev_a; if (host_a == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } print_array(host_a, N); try { cuda_init_array(&dev_a, NULL, size); } catch (cudaError_t err) { fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } init<<<blocksPerGrid, threadsPerBlock>>>(dev_a, N); err = cudaMemcpy(host_a, dev_a, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } print_array(host_a, N); cudaFree(dev_a); delete[] host_a; return 0; }
595c37d5a5f59a62546f8bec21d528cf5cc2ee71.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/utility.hpp" #include "opencv2/gpu/device/reduce.hpp" #include "opencv2/gpu/device/limits.hpp" #include "opencv2/gpu/device/vec_distance.hpp" #include "opencv2/gpu/device/datamov_utils.hpp" namespace cv { namespace gpu { namespace device { namespace bf_match { /////////////////////////////////////////////////////////////////////////////// // Reduction template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, float* s_distance, int* s_trainIdx) { s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; reduceKeyVal<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, threadIdx.x, less<float>()); } template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, int& bestImgIdx, float* s_distance, int* s_trainIdx, int* s_imgIdx) { s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; s_imgIdx += threadIdx.y * BLOCK_SIZE; reduceKeyVal<BLOCK_SIZE>(s_distance, bestDistance, smem_tuple(s_trainIdx, s_imgIdx), thrust::tie(bestTrainIdx, bestImgIdx), threadIdx.x, less<float>()); } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled Cached template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U> __device__ void loadQueryToSmem(int queryIdx, const PtrStepSz<T>& query, U* s_query) { #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolledCached(int queryIdx, const PtrStepSz<T>& query,volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance, int& bestTrainIdx, int& bestImgIdx) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < train.cols) { T val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx)) { bestImgIdx = imgIdx; bestDistance = distVal; bestTrainIdx = trainIdx; } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance = numeric_limits<float>::max(); int myBestTrainIdx = -1; loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = myBestTrainIdx; bestDistance[queryIdx] = myBestDistance; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int* bestTrainIdx, int* bestImgIdx, float* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance = numeric_limits<float>::max(); int myBestTrainIdx = -1; int myBestImgIdx = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = myBestTrainIdx; bestImgIdx[queryIdx] = myBestImgIdx; bestDistance[queryIdx] = myBestDistance; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolled(int queryIdx, const PtrStepSz<T>& query,volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance, int& bestTrainIdx, int& bestImgIdx) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx)) { bestImgIdx = imgIdx; bestDistance = distVal; bestTrainIdx = trainIdx; } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; float myBestDistance = numeric_limits<float>::max(); int myBestTrainIdx = -1; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = myBestTrainIdx; bestDistance[queryIdx] = myBestDistance; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int* bestTrainIdx, int* bestImgIdx, float* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; float myBestDistance = numeric_limits<float>::max(); int myBestTrainIdx = -1; int myBestImgIdx = -1; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = myBestTrainIdx; bestImgIdx[queryIdx] = myBestImgIdx; bestDistance[queryIdx] = myBestDistance; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __device__ void loop(int queryIdx, const PtrStepSz<T>& query, volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance, int& bestTrainIdx, int& bestImgIdx) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx)) { bestImgIdx = imgIdx; bestDistance = distVal; bestTrainIdx = trainIdx; } } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; float myBestDistance = numeric_limits<float>::max(); int myBestTrainIdx = -1; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = myBestTrainIdx; bestDistance[queryIdx] = myBestDistance; } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int* bestTrainIdx, int* bestImgIdx, float* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; float myBestDistance = numeric_limits<float>::max(); int myBestTrainIdx = -1; int myBestImgIdx = -1; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = myBestTrainIdx; bestImgIdx[queryIdx] = myBestImgIdx; bestDistance[queryIdx] = myBestDistance; } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match dispatcher template <typename Dist, typename T, typename Mask> void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream) { if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, train, mask, trainIdx, distance, stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, train, mask, trainIdx, distance, stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, train, mask, trainIdx, distance, stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, train, mask, trainIdx, distance, stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, train, mask, trainIdx, distance, stream); }*/ else { match<16, Dist>(query, train, mask, trainIdx, distance, stream); } } template <typename Dist, typename T, typename Mask> void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream) { if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream); }*/ else { match<16, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream); } } /////////////////////////////////////////////////////////////////////////////// // Match caller template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream) { if (mask.data) { matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask), trainIdx, distance, stream); } else { matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(), trainIdx, distance, stream); } } template void matchL1_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchL1_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchL1_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchL1_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchL1_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchL1_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream) { if (mask.data) { matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask), trainIdx, distance, stream); } else { matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(), trainIdx, distance, stream); } } //template void matchL2_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchL2_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchL2_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchL2_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchL2_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchL2_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream) { if (mask.data) { matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask), trainIdx, distance, stream); } else { matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(), trainIdx, distance, stream); } } template void matchHamming_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchHamming_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchHamming_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchHamming_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchHamming_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream); template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream) { if (masks.data) { matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); } else { matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } } template void matchL1_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchL1_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchL1_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchL1_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchL1_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchL1_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream) { if (masks.data) { matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); } else { matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } } //template void matchL2_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchL2_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchL2_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchL2_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchL2_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchL2_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& maskCollection, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream) { if (masks.data) { matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); } else { matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } } template void matchHamming_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchHamming_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchHamming_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); //template void matchHamming_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); template void matchHamming_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream); } // namespace bf_match }}} // namespace cv { namespace gpu { namespace device { #endif /* CUDA_DISABLER */
595c37d5a5f59a62546f8bec21d528cf5cc2ee71.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/utility.hpp" #include "opencv2/gpu/device/reduce.hpp" #include "opencv2/gpu/device/limits.hpp" #include "opencv2/gpu/device/vec_distance.hpp" #include "opencv2/gpu/device/datamov_utils.hpp" namespace cv { namespace gpu { namespace device { namespace bf_match { /////////////////////////////////////////////////////////////////////////////// // Reduction template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, float* s_distance, int* s_trainIdx) { s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; reduceKeyVal<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, threadIdx.x, less<float>()); } template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, int& bestImgIdx, float* s_distance, int* s_trainIdx, int* s_imgIdx) { s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; s_imgIdx += threadIdx.y * BLOCK_SIZE; reduceKeyVal<BLOCK_SIZE>(s_distance, bestDistance, smem_tuple(s_trainIdx, s_imgIdx), thrust::tie(bestTrainIdx, bestImgIdx), threadIdx.x, less<float>()); } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled Cached template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U> __device__ void loadQueryToSmem(int queryIdx, const PtrStepSz<T>& query, U* s_query) { #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolledCached(int queryIdx, const PtrStepSz<T>& query,volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance, int& bestTrainIdx, int& bestImgIdx) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < train.cols) { T val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx)) { bestImgIdx = imgIdx; bestDistance = distVal; bestTrainIdx = trainIdx; } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance = numeric_limits<float>::max(); int myBestTrainIdx = -1; loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = myBestTrainIdx; bestDistance[queryIdx] = myBestDistance; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int* bestTrainIdx, int* bestImgIdx, float* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance = numeric_limits<float>::max(); int myBestTrainIdx = -1; int myBestImgIdx = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = myBestTrainIdx; bestImgIdx[queryIdx] = myBestImgIdx; bestDistance[queryIdx] = myBestDistance; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolled(int queryIdx, const PtrStepSz<T>& query,volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance, int& bestTrainIdx, int& bestImgIdx) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx)) { bestImgIdx = imgIdx; bestDistance = distVal; bestTrainIdx = trainIdx; } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; float myBestDistance = numeric_limits<float>::max(); int myBestTrainIdx = -1; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = myBestTrainIdx; bestDistance[queryIdx] = myBestDistance; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int* bestTrainIdx, int* bestImgIdx, float* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; float myBestDistance = numeric_limits<float>::max(); int myBestTrainIdx = -1; int myBestImgIdx = -1; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = myBestTrainIdx; bestImgIdx[queryIdx] = myBestImgIdx; bestDistance[queryIdx] = myBestDistance; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __device__ void loop(int queryIdx, const PtrStepSz<T>& query, volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance, int& bestTrainIdx, int& bestImgIdx) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx)) { bestImgIdx = imgIdx; bestDistance = distVal; bestTrainIdx = trainIdx; } } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; float myBestDistance = numeric_limits<float>::max(); int myBestTrainIdx = -1; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = myBestTrainIdx; bestDistance[queryIdx] = myBestDistance; } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int* bestTrainIdx, int* bestImgIdx, float* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; float myBestDistance = numeric_limits<float>::max(); int myBestTrainIdx = -1; int myBestImgIdx = -1; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = myBestTrainIdx; bestImgIdx[queryIdx] = myBestImgIdx; bestDistance[queryIdx] = myBestDistance; } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match dispatcher template <typename Dist, typename T, typename Mask> void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream) { if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, train, mask, trainIdx, distance, stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, train, mask, trainIdx, distance, stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, train, mask, trainIdx, distance, stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, train, mask, trainIdx, distance, stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, train, mask, trainIdx, distance, stream); }*/ else { match<16, Dist>(query, train, mask, trainIdx, distance, stream); } } template <typename Dist, typename T, typename Mask> void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream) { if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream); }*/ else { match<16, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream); } } /////////////////////////////////////////////////////////////////////////////// // Match caller template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream) { if (mask.data) { matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask), trainIdx, distance, stream); } else { matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(), trainIdx, distance, stream); } } template void matchL1_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchL1_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchL1_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchL1_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchL1_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchL1_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream) { if (mask.data) { matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask), trainIdx, distance, stream); } else { matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(), trainIdx, distance, stream); } } //template void matchL2_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchL2_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchL2_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchL2_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchL2_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchL2_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream) { if (mask.data) { matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask), trainIdx, distance, stream); } else { matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(), trainIdx, distance, stream); } } template void matchHamming_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchHamming_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchHamming_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchHamming_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchHamming_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream); template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream) { if (masks.data) { matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); } else { matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } } template void matchL1_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchL1_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchL1_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchL1_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchL1_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchL1_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream) { if (masks.data) { matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); } else { matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } } //template void matchL2_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchL2_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchL2_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchL2_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchL2_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchL2_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& maskCollection, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream) { if (masks.data) { matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); } else { matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } } template void matchHamming_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchHamming_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchHamming_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); //template void matchHamming_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); template void matchHamming_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream); } // namespace bf_match }}} // namespace cv { namespace gpu { namespace device { #endif /* CUDA_DISABLER */
b7072b29c1feee68eacd2dce1f3cd1dd40e880aa.hip
// !!! This is a file automatically generated by hipify!!! #include "matrix.hh" Matrix::Matrix(size_t x_dim, size_t y_dim, bool hostOnly) : shape(x_dim, y_dim), deviceData(nullptr), hostData(nullptr), deviceAllocated(false), hostAllocated(false), hostOnly(hostOnly) { } Matrix::Matrix(Shape shape, bool hostOnly) : Matrix(shape.x, shape.y, hostOnly) { } void Matrix::allocateDeviceMemory() { if (!deviceAllocated) { hipError_t ok; float * deviceMemory = nullptr; ok = hipMalloc(&deviceMemory, shape.x * shape.y * sizeof(float)); checkCudaErrors(ok); deviceData = std::shared_ptr<float> (deviceMemory, [&](float* ptr){ hipFree(ptr); }); deviceAllocated = true; } } void Matrix::allocateHostMemory() { if (!hostAllocated) { hostData = std::shared_ptr<float> (new float[shape.x*shape.y], [&](float* ptr){ delete[] ptr; }); hostAllocated = true; } } void Matrix::allocateMemory() { allocateHostMemory(); if (!hostOnly) { allocateDeviceMemory(); } } void Matrix::maybeAllocateMemory(Shape shape) { if (!deviceAllocated && !hostAllocated) { this->shape = shape; allocateMemory(); } } void Matrix::copyHostToDevice() { if (deviceAllocated && hostAllocated) { hipError_t ok; ok = hipMemcpy(deviceData.get(), hostData.get(), shape.x * shape.y * sizeof(float), hipMemcpyHostToDevice); checkCudaErrors(ok); } else { printf("Failed to copy from host to device... nothing initialized\n"); } } void Matrix::copyDeviceToHost() { if (deviceAllocated && hostAllocated) { hipError_t ok; ok = hipMemcpy( hostData.get(), deviceData.get(), shape.x * shape.y * sizeof(float), hipMemcpyDeviceToHost ); checkCudaErrors(ok); } else { printf("Failed to copy from device to host... nothing initialized\n"); } } float& Matrix::operator[](const int index) { return hostData.get()[index]; } const float& Matrix::operator[](const int index) const { return hostData.get()[index]; }
b7072b29c1feee68eacd2dce1f3cd1dd40e880aa.cu
#include "matrix.hh" Matrix::Matrix(size_t x_dim, size_t y_dim, bool hostOnly) : shape(x_dim, y_dim), deviceData(nullptr), hostData(nullptr), deviceAllocated(false), hostAllocated(false), hostOnly(hostOnly) { } Matrix::Matrix(Shape shape, bool hostOnly) : Matrix(shape.x, shape.y, hostOnly) { } void Matrix::allocateDeviceMemory() { if (!deviceAllocated) { cudaError_t ok; float * deviceMemory = nullptr; ok = cudaMalloc(&deviceMemory, shape.x * shape.y * sizeof(float)); checkCudaErrors(ok); deviceData = std::shared_ptr<float> (deviceMemory, [&](float* ptr){ cudaFree(ptr); }); deviceAllocated = true; } } void Matrix::allocateHostMemory() { if (!hostAllocated) { hostData = std::shared_ptr<float> (new float[shape.x*shape.y], [&](float* ptr){ delete[] ptr; }); hostAllocated = true; } } void Matrix::allocateMemory() { allocateHostMemory(); if (!hostOnly) { allocateDeviceMemory(); } } void Matrix::maybeAllocateMemory(Shape shape) { if (!deviceAllocated && !hostAllocated) { this->shape = shape; allocateMemory(); } } void Matrix::copyHostToDevice() { if (deviceAllocated && hostAllocated) { cudaError_t ok; ok = cudaMemcpy(deviceData.get(), hostData.get(), shape.x * shape.y * sizeof(float), cudaMemcpyHostToDevice); checkCudaErrors(ok); } else { printf("Failed to copy from host to device... nothing initialized\n"); } } void Matrix::copyDeviceToHost() { if (deviceAllocated && hostAllocated) { cudaError_t ok; ok = cudaMemcpy( hostData.get(), deviceData.get(), shape.x * shape.y * sizeof(float), cudaMemcpyDeviceToHost ); checkCudaErrors(ok); } else { printf("Failed to copy from device to host... nothing initialized\n"); } } float& Matrix::operator[](const int index) { return hostData.get()[index]; } const float& Matrix::operator[](const int index) const { return hostData.get()[index]; }
21c325ec5a17a99c7a2337f21196cd44177a0c96.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Fast Accurate Fourier Transform (FAFT) was written by Oscar R. Cabrera L. // Contributors: Renan Cabrera, Denys I. Bondar. // Copyright (c) 2016 // All rights reserved. #include "FAFTp_R2C_C2R.h" // ax Split 1 __global__ void IFAFT128_C2R_ax1_dev( float *re, float *im, float2 *data65, float dx, float delta, int segment ) { int tid = threadIdx.x; size_t sector = blockIdx.y*gridDim.x + blockIdx.x; re += (sector*64) + tid; im += (sector*64) + tid + 32; if (tid == 0) data65 += sector; float2 y[16]; load128_half_C2R_ax1( 8, y, re, im, data65, 16, tid ); GENERAL_FAFT128( y, dx, delta, segment, tid ); store128_half_C2R_ax1<8>( y, re, data65, 16, tid ); } extern "C" int IFAFT128_1D_C2R( float *data, float2 *data65, float dx, float delta, int segment ) { int success = 1; dim3 grid_C2R(1, 1); hipLaunchKernelGGL(( IFAFT128_C2R_ax1_dev), dim3(grid_C2R), dim3(16) , 0, 0, data, data, data65, dx, delta, segment ); hipDeviceSynchronize(); return success; }
21c325ec5a17a99c7a2337f21196cd44177a0c96.cu
// Fast Accurate Fourier Transform (FAFT) was written by Oscar R. Cabrera L. // Contributors: Renan Cabrera, Denys I. Bondar. // Copyright (c) 2016 // All rights reserved. #include "FAFTp_R2C_C2R.h" // ax Split 1 __global__ void IFAFT128_C2R_ax1_dev( float *re, float *im, float2 *data65, float dx, float delta, int segment ) { int tid = threadIdx.x; size_t sector = blockIdx.y*gridDim.x + blockIdx.x; re += (sector*64) + tid; im += (sector*64) + tid + 32; if (tid == 0) data65 += sector; float2 y[16]; load128_half_C2R_ax1( 8, y, re, im, data65, 16, tid ); GENERAL_FAFT128( y, dx, delta, segment, tid ); store128_half_C2R_ax1<8>( y, re, data65, 16, tid ); } extern "C" int IFAFT128_1D_C2R( float *data, float2 *data65, float dx, float delta, int segment ) { int success = 1; dim3 grid_C2R(1, 1); IFAFT128_C2R_ax1_dev<<< grid_C2R, 16 >>>( data, data, data65, dx, delta, segment ); cudaThreadSynchronize(); return success; }
4388dba03ab8d8c6f6931f0da1ab13878fed3529.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu_runtime.h" __global__ void broadcast_linear_bias(const float *input_data, float *output_data, size_t input_size, size_t output_size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= output_size) return; output_data[id] = input_data[id % input_size]; } int DLGpuLinear(const DLArrayHandle matA, bool transposeA, const DLArrayHandle matB, bool transposeB, const DLArrayHandle bias, DLArrayHandle matC, DLStreamHandle stream_handle = NULL) { // cublas assume matrix is column major assert(matA->ndim == 2); assert(matB->ndim == 2); assert(bias->ndim == 1); assert(matC->ndim == 2); size_t input_size = bias->shape[0]; size_t size = input_size * matC->shape[0]; dim3 blocks; dim3 threads; if (size <= 1024) { threads.x = size; blocks.x = 1; } else { threads.x = 1024; blocks.x = (size + 1023) / 1024; } if (stream_handle) { hipStream_t *s = (hipStream_t *)(stream_handle->handle); hipLaunchKernelGGL(( broadcast_linear_bias), dim3(blocks), dim3(threads), 0, *s, (const float *)(bias->data), (float *)(matC->data), input_size, size); } else { hipLaunchKernelGGL(( broadcast_linear_bias), dim3(blocks), dim3(threads), 0, 0, (const float *)(bias->data), (float *)(matC->data), input_size, size); } int dev_id = (matA->ctx).device_id; cublas_init(dev_id, stream_handle); float one = 1.0f; int m = matC->shape[1]; int n = matC->shape[0]; int k = transposeA ? matA->shape[0] : matA->shape[1]; hipblasSgemm(cublas_map[dev_id], transposeB ? HIPBLAS_OP_T : HIPBLAS_OP_N, transposeA ? HIPBLAS_OP_T : HIPBLAS_OP_N, m, n, k, &one, (const float *)matB->data, !transposeB ? m : k, (const float *)matA->data, !transposeA ? k : n, &one, (float *)matC->data, m); return 0; }
4388dba03ab8d8c6f6931f0da1ab13878fed3529.cu
#include "gpu_runtime.h" __global__ void broadcast_linear_bias(const float *input_data, float *output_data, size_t input_size, size_t output_size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= output_size) return; output_data[id] = input_data[id % input_size]; } int DLGpuLinear(const DLArrayHandle matA, bool transposeA, const DLArrayHandle matB, bool transposeB, const DLArrayHandle bias, DLArrayHandle matC, DLStreamHandle stream_handle = NULL) { // cublas assume matrix is column major assert(matA->ndim == 2); assert(matB->ndim == 2); assert(bias->ndim == 1); assert(matC->ndim == 2); size_t input_size = bias->shape[0]; size_t size = input_size * matC->shape[0]; dim3 blocks; dim3 threads; if (size <= 1024) { threads.x = size; blocks.x = 1; } else { threads.x = 1024; blocks.x = (size + 1023) / 1024; } if (stream_handle) { cudaStream_t *s = (cudaStream_t *)(stream_handle->handle); broadcast_linear_bias<<<blocks, threads, 0, *s>>>( (const float *)(bias->data), (float *)(matC->data), input_size, size); } else { broadcast_linear_bias<<<blocks, threads>>>( (const float *)(bias->data), (float *)(matC->data), input_size, size); } int dev_id = (matA->ctx).device_id; cublas_init(dev_id, stream_handle); float one = 1.0f; int m = matC->shape[1]; int n = matC->shape[0]; int k = transposeA ? matA->shape[0] : matA->shape[1]; cublasSgemm(cublas_map[dev_id], transposeB ? CUBLAS_OP_T : CUBLAS_OP_N, transposeA ? CUBLAS_OP_T : CUBLAS_OP_N, m, n, k, &one, (const float *)matB->data, !transposeB ? m : k, (const float *)matA->data, !transposeA ? k : n, &one, (float *)matC->data, m); return 0; }
8b43d0b037b9b8bee5b00a66546a2af7655e8373.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // MaskEventsIn is auto-generated from csv file struct MaskEventsIn{ int nev; uint *luminosityBlock; int *HLT_Ele32_WPTight_Gsf; int *HLT_IsoMu24; uint *nElectron; uint *nMuon; }; // EventsIn is auto-generated from csv file struct EventsIn{ int nev; int *HLT_Ele32_WPTight_Gsf; int *HLT_IsoMu24; uint *nElectron; uint *nMuon; float *Electron_pt; float *Electron_eta; float *Electron_phi; float *Electron_mass; int *Electron_cutBased; float *Electron_pfRelIso03_all; int *Electron_pdgId; float *Muon_pt; float *Muon_eta; float *Muon_phi; float *Muon_mass; int *Muon_isGlobal; int *Muon_isPFcand; int *Muon_tightId; float *Muon_pfRelIso03_all; int *Muon_pdgId; uint *cumsum_nElectron; uint *cumsum_nMuon; }; // EventsMid is auto-generated from csv file struct EventsMid{ int MAXNLEPTON; int *iPassElectron; uint *iPassMuon; }; // EventsOut is auto-generated from csv file struct EventsOut{ int *channel; int *nPassElectron; int *nPassMuon; float *lepton1Pt; float *lepton1Eta; float *lepton1Phi; float *lepton1M; float *lepton2Pt; float *lepton2Eta; float *lepton2Phi; float *lepton2M; float *dileptonPt; float *dileptonM; float *leptonsDeltaPhi; float *leptonsDeltaR; float *lepton1Pdgid; float *lepton2Pdgid; float *lepton1Reliso; float *lepton2Reliso; }; // some handy lorentz verctor and methords struct P4_PtEtaPhiM{ float pt; float eta; float phi; float m; }; __device__ P4_PtEtaPhiM lorentz_add( P4_PtEtaPhiM *p1, P4_PtEtaPhiM *p2){ float px1 = p1->pt*cos(p1->phi); float py1 = p1->pt*sin(p1->phi); float pz1 = p1->pt*sinh(p1->eta); float pe1 = sqrt(px1*px1 + py1*py1 + pz1*pz1 + p1->m*p1->m); float px2 = p2->pt*cos(p2->phi); float py2 = p2->pt*sin(p2->phi); float pz2 = p2->pt*sinh(p2->eta); float pe2 = sqrt(px2*px2 + py2*py2 + pz2*pz2 + p2->m*p2->m); float qx = px1+px2; float qy = py1+py2; float qz = pz1+pz2; float qe = pe1+pe2; float q_pt = sqrt(qx*qx + qy*qy); float q_eta = 0.0; // FIX ME float q_phi = 0.0; // FIX ME float q_m = sqrt(qe*qe - qx*qx - qy*qy - qz*qz); struct P4_PtEtaPhiM q = {q_pt, q_eta, q_phi, q_m}; return q; } // root function return phi in [-pi,pi] //https://root.cern.ch/doc/master/TVector2_8cxx_source.html#l00103 __device__ float phi_mpi_pi(float x){ while(x>M_PI) x -= 2*M_PI; while(x<-M_PI) x += 2*M_PI; return x; } ////////////// // mask // ////////////// __global__ void knl_mask(MaskEventsIn *evsI, bool *mask) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { bool isPass = false; if ( (evsI->HLT_Ele32_WPTight_Gsf[iev] || evsI->HLT_IsoMu24[iev]) && (evsI->nElectron[iev]>=2 || evsI->nMuon[iev]>=2) ){ isPass = true; } mask[iev] = isPass; } } ////////////////// // obj-electron // ////////////////// __global__ void knl_objectSelection_electron(EventsIn *evsI, EventsMid *evsM, EventsOut *evsO) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { const int cumsum_nObject = evsI->cumsum_nElectron[iev]; const int nObject = evsI->nElectron[iev]; int nPassObject = 0; // loop over all enectrons in the event for( int i = cumsum_nObject; i < cumsum_nObject + nObject; i++){ if (nPassObject >= evsM->MAXNLEPTON) break; if( evsI->Electron_pt[i] > 20 && abs(evsI->Electron_eta[i]) < 2.5 && evsI->Electron_cutBased[i] >= 3 ){ evsM->iPassElectron[iev*evsM->MAXNLEPTON + nPassObject] = i-cumsum_nObject; nPassObject++; } } // end of loop evsO->nPassElectron[iev] = nPassObject; } } ////////////////// // obj-muon // ////////////////// __global__ void knl_objectSelection_muon(EventsIn *evsI, EventsMid *evsM, EventsOut *evsO) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { const int cumsum_nObject = evsI->cumsum_nMuon[iev]; const int nObject = evsI->nMuon[iev]; int nPassObject = 0; // loop over all enectrons in the event for( int i = cumsum_nObject; i < cumsum_nObject + nObject; i++){ if (nPassObject >= evsM->MAXNLEPTON) break; if( evsI->Muon_pt[i] > 10 && abs(evsI->Muon_eta[i]) < 2.4 && evsI->Muon_isGlobal[i] == 1 && evsI->Muon_isPFcand[i] == 1 && evsI->Muon_tightId[i] == 1 ){ evsM->iPassMuon[iev*evsM->MAXNLEPTON + nPassObject] = i-cumsum_nObject; nPassObject++; } } // end of loop evsO->nPassMuon[iev] = nPassObject; } } ////////////////// // event selection ////////////////// __global__ void knl_eventSelection(EventsIn *evsI, EventsMid *evsM, EventsOut *evsO) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { int MAXNLEPTON = evsM->MAXNLEPTON; struct P4_PtEtaPhiM lep1, lep2, dilepton; evsO->channel[iev] = -1; if (evsI->HLT_Ele32_WPTight_Gsf[iev]==1 && evsO->nPassElectron[iev]>=2 && evsO->nPassMuon[iev]==0){ // get index int l1 = evsM->iPassElectron[iev*MAXNLEPTON+0] + evsI->cumsum_nElectron[iev]; int l2 = evsM->iPassElectron[iev*MAXNLEPTON+1] + evsI->cumsum_nElectron[iev]; // pt threshold if (evsI->Electron_pt[l1]<32 || evsI->Electron_pt[l2]<20) return; // opposite sign if (evsI->Electron_pdgId[l1] * evsI->Electron_pdgId[l2] > 0) return; // dilepton mass veto lep1 = {evsI->Electron_pt[l1], evsI->Electron_eta[l1], evsI->Electron_phi[l1], evsI->Electron_mass[l1]}; lep2 = {evsI->Electron_pt[l2], evsI->Electron_eta[l2], evsI->Electron_phi[l2], evsI->Electron_mass[l2]}; dilepton = lorentz_add(&lep1, &lep2); if(dilepton.m<60 || dilepton.m>130) return; // fillout evsO evsO->channel[iev] = 0; evsO->lepton1Pdgid[iev] = evsI->Electron_pdgId[l1]; evsO->lepton2Pdgid[iev] = evsI->Electron_pdgId[l2]; evsO->lepton1Reliso[iev] = evsI->Electron_pfRelIso03_all[l1]; evsO->lepton2Reliso[iev] = evsI->Electron_pfRelIso03_all[l2]; } else if (evsI->HLT_IsoMu24[iev]==1 && evsO->nPassElectron[iev]==0 && evsO->nPassMuon[iev]>=2){ // get index int l1 = evsM->iPassMuon[iev*MAXNLEPTON+0] + evsI->cumsum_nMuon[iev]; int l2 = evsM->iPassMuon[iev*MAXNLEPTON+1] + evsI->cumsum_nMuon[iev]; // pt threshold if (evsI->Muon_pt[l1]<27 || evsI->Muon_pt[l2]<10) return; // opposite sign if (evsI->Muon_pdgId[l1] * evsI->Muon_pdgId[l2] > 0) return; // dilepton mass veto lep1 = {evsI->Muon_pt[l1], evsI->Muon_eta[l1], evsI->Muon_phi[l1], evsI->Muon_mass[l1]}; lep2 = {evsI->Muon_pt[l2], evsI->Muon_eta[l2], evsI->Muon_phi[l2], evsI->Muon_mass[l2]}; dilepton = lorentz_add(&lep1, &lep2); if(dilepton.m<60 || dilepton.m>130) return; // fillout evsO evsO->channel[iev] = 1; evsO->lepton1Pdgid[iev] = evsI->Muon_pdgId[l1]; evsO->lepton2Pdgid[iev] = evsI->Muon_pdgId[l2]; evsO->lepton1Reliso[iev] = evsI->Muon_pfRelIso03_all[l1]; evsO->lepton2Reliso[iev] = evsI->Muon_pfRelIso03_all[l2]; } /////////////////// // fill leptons p4 /////////////////// if (evsO->channel[iev] != -1){ // lep1 p4 evsO->lepton1Pt[iev] = lep1.pt; evsO->lepton1Eta[iev] = lep1.eta; evsO->lepton1Phi[iev] = lep1.phi; evsO->lepton1M[iev] = lep1.m; // lep2 p4 evsO->lepton2Pt[iev] = lep2.pt; evsO->lepton2Eta[iev] = lep2.eta; evsO->lepton2Phi[iev] = lep2.phi; evsO->lepton2M[iev] = lep2.m; // dilepton p4 evsO->dileptonPt[iev] = dilepton.pt; evsO->dileptonM[iev] = dilepton.m; // lep1-lep2 delta float deltaPhi = phi_mpi_pi(lep1.phi-lep2.phi); evsO->leptonsDeltaR[iev] = sqrt((lep1.eta-lep2.eta)*(lep1.eta-lep2.eta) + deltaPhi*deltaPhi) ; evsO->leptonsDeltaPhi[iev] = deltaPhi; } } }
8b43d0b037b9b8bee5b00a66546a2af7655e8373.cu
// MaskEventsIn is auto-generated from csv file struct MaskEventsIn{ int nev; uint *luminosityBlock; int *HLT_Ele32_WPTight_Gsf; int *HLT_IsoMu24; uint *nElectron; uint *nMuon; }; // EventsIn is auto-generated from csv file struct EventsIn{ int nev; int *HLT_Ele32_WPTight_Gsf; int *HLT_IsoMu24; uint *nElectron; uint *nMuon; float *Electron_pt; float *Electron_eta; float *Electron_phi; float *Electron_mass; int *Electron_cutBased; float *Electron_pfRelIso03_all; int *Electron_pdgId; float *Muon_pt; float *Muon_eta; float *Muon_phi; float *Muon_mass; int *Muon_isGlobal; int *Muon_isPFcand; int *Muon_tightId; float *Muon_pfRelIso03_all; int *Muon_pdgId; uint *cumsum_nElectron; uint *cumsum_nMuon; }; // EventsMid is auto-generated from csv file struct EventsMid{ int MAXNLEPTON; int *iPassElectron; uint *iPassMuon; }; // EventsOut is auto-generated from csv file struct EventsOut{ int *channel; int *nPassElectron; int *nPassMuon; float *lepton1Pt; float *lepton1Eta; float *lepton1Phi; float *lepton1M; float *lepton2Pt; float *lepton2Eta; float *lepton2Phi; float *lepton2M; float *dileptonPt; float *dileptonM; float *leptonsDeltaPhi; float *leptonsDeltaR; float *lepton1Pdgid; float *lepton2Pdgid; float *lepton1Reliso; float *lepton2Reliso; }; // some handy lorentz verctor and methords struct P4_PtEtaPhiM{ float pt; float eta; float phi; float m; }; __device__ P4_PtEtaPhiM lorentz_add( P4_PtEtaPhiM *p1, P4_PtEtaPhiM *p2){ float px1 = p1->pt*cos(p1->phi); float py1 = p1->pt*sin(p1->phi); float pz1 = p1->pt*sinh(p1->eta); float pe1 = sqrt(px1*px1 + py1*py1 + pz1*pz1 + p1->m*p1->m); float px2 = p2->pt*cos(p2->phi); float py2 = p2->pt*sin(p2->phi); float pz2 = p2->pt*sinh(p2->eta); float pe2 = sqrt(px2*px2 + py2*py2 + pz2*pz2 + p2->m*p2->m); float qx = px1+px2; float qy = py1+py2; float qz = pz1+pz2; float qe = pe1+pe2; float q_pt = sqrt(qx*qx + qy*qy); float q_eta = 0.0; // FIX ME float q_phi = 0.0; // FIX ME float q_m = sqrt(qe*qe - qx*qx - qy*qy - qz*qz); struct P4_PtEtaPhiM q = {q_pt, q_eta, q_phi, q_m}; return q; } // root function return phi in [-pi,pi] //https://root.cern.ch/doc/master/TVector2_8cxx_source.html#l00103 __device__ float phi_mpi_pi(float x){ while(x>M_PI) x -= 2*M_PI; while(x<-M_PI) x += 2*M_PI; return x; } ////////////// // mask // ////////////// __global__ void knl_mask(MaskEventsIn *evsI, bool *mask) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { bool isPass = false; if ( (evsI->HLT_Ele32_WPTight_Gsf[iev] || evsI->HLT_IsoMu24[iev]) && (evsI->nElectron[iev]>=2 || evsI->nMuon[iev]>=2) ){ isPass = true; } mask[iev] = isPass; } } ////////////////// // obj-electron // ////////////////// __global__ void knl_objectSelection_electron(EventsIn *evsI, EventsMid *evsM, EventsOut *evsO) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { const int cumsum_nObject = evsI->cumsum_nElectron[iev]; const int nObject = evsI->nElectron[iev]; int nPassObject = 0; // loop over all enectrons in the event for( int i = cumsum_nObject; i < cumsum_nObject + nObject; i++){ if (nPassObject >= evsM->MAXNLEPTON) break; if( evsI->Electron_pt[i] > 20 && abs(evsI->Electron_eta[i]) < 2.5 && evsI->Electron_cutBased[i] >= 3 ){ evsM->iPassElectron[iev*evsM->MAXNLEPTON + nPassObject] = i-cumsum_nObject; nPassObject++; } } // end of loop evsO->nPassElectron[iev] = nPassObject; } } ////////////////// // obj-muon // ////////////////// __global__ void knl_objectSelection_muon(EventsIn *evsI, EventsMid *evsM, EventsOut *evsO) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { const int cumsum_nObject = evsI->cumsum_nMuon[iev]; const int nObject = evsI->nMuon[iev]; int nPassObject = 0; // loop over all enectrons in the event for( int i = cumsum_nObject; i < cumsum_nObject + nObject; i++){ if (nPassObject >= evsM->MAXNLEPTON) break; if( evsI->Muon_pt[i] > 10 && abs(evsI->Muon_eta[i]) < 2.4 && evsI->Muon_isGlobal[i] == 1 && evsI->Muon_isPFcand[i] == 1 && evsI->Muon_tightId[i] == 1 ){ evsM->iPassMuon[iev*evsM->MAXNLEPTON + nPassObject] = i-cumsum_nObject; nPassObject++; } } // end of loop evsO->nPassMuon[iev] = nPassObject; } } ////////////////// // event selection ////////////////// __global__ void knl_eventSelection(EventsIn *evsI, EventsMid *evsM, EventsOut *evsO) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { int MAXNLEPTON = evsM->MAXNLEPTON; struct P4_PtEtaPhiM lep1, lep2, dilepton; evsO->channel[iev] = -1; if (evsI->HLT_Ele32_WPTight_Gsf[iev]==1 && evsO->nPassElectron[iev]>=2 && evsO->nPassMuon[iev]==0){ // get index int l1 = evsM->iPassElectron[iev*MAXNLEPTON+0] + evsI->cumsum_nElectron[iev]; int l2 = evsM->iPassElectron[iev*MAXNLEPTON+1] + evsI->cumsum_nElectron[iev]; // pt threshold if (evsI->Electron_pt[l1]<32 || evsI->Electron_pt[l2]<20) return; // opposite sign if (evsI->Electron_pdgId[l1] * evsI->Electron_pdgId[l2] > 0) return; // dilepton mass veto lep1 = {evsI->Electron_pt[l1], evsI->Electron_eta[l1], evsI->Electron_phi[l1], evsI->Electron_mass[l1]}; lep2 = {evsI->Electron_pt[l2], evsI->Electron_eta[l2], evsI->Electron_phi[l2], evsI->Electron_mass[l2]}; dilepton = lorentz_add(&lep1, &lep2); if(dilepton.m<60 || dilepton.m>130) return; // fillout evsO evsO->channel[iev] = 0; evsO->lepton1Pdgid[iev] = evsI->Electron_pdgId[l1]; evsO->lepton2Pdgid[iev] = evsI->Electron_pdgId[l2]; evsO->lepton1Reliso[iev] = evsI->Electron_pfRelIso03_all[l1]; evsO->lepton2Reliso[iev] = evsI->Electron_pfRelIso03_all[l2]; } else if (evsI->HLT_IsoMu24[iev]==1 && evsO->nPassElectron[iev]==0 && evsO->nPassMuon[iev]>=2){ // get index int l1 = evsM->iPassMuon[iev*MAXNLEPTON+0] + evsI->cumsum_nMuon[iev]; int l2 = evsM->iPassMuon[iev*MAXNLEPTON+1] + evsI->cumsum_nMuon[iev]; // pt threshold if (evsI->Muon_pt[l1]<27 || evsI->Muon_pt[l2]<10) return; // opposite sign if (evsI->Muon_pdgId[l1] * evsI->Muon_pdgId[l2] > 0) return; // dilepton mass veto lep1 = {evsI->Muon_pt[l1], evsI->Muon_eta[l1], evsI->Muon_phi[l1], evsI->Muon_mass[l1]}; lep2 = {evsI->Muon_pt[l2], evsI->Muon_eta[l2], evsI->Muon_phi[l2], evsI->Muon_mass[l2]}; dilepton = lorentz_add(&lep1, &lep2); if(dilepton.m<60 || dilepton.m>130) return; // fillout evsO evsO->channel[iev] = 1; evsO->lepton1Pdgid[iev] = evsI->Muon_pdgId[l1]; evsO->lepton2Pdgid[iev] = evsI->Muon_pdgId[l2]; evsO->lepton1Reliso[iev] = evsI->Muon_pfRelIso03_all[l1]; evsO->lepton2Reliso[iev] = evsI->Muon_pfRelIso03_all[l2]; } /////////////////// // fill leptons p4 /////////////////// if (evsO->channel[iev] != -1){ // lep1 p4 evsO->lepton1Pt[iev] = lep1.pt; evsO->lepton1Eta[iev] = lep1.eta; evsO->lepton1Phi[iev] = lep1.phi; evsO->lepton1M[iev] = lep1.m; // lep2 p4 evsO->lepton2Pt[iev] = lep2.pt; evsO->lepton2Eta[iev] = lep2.eta; evsO->lepton2Phi[iev] = lep2.phi; evsO->lepton2M[iev] = lep2.m; // dilepton p4 evsO->dileptonPt[iev] = dilepton.pt; evsO->dileptonM[iev] = dilepton.m; // lep1-lep2 delta float deltaPhi = phi_mpi_pi(lep1.phi-lep2.phi); evsO->leptonsDeltaR[iev] = sqrt((lep1.eta-lep2.eta)*(lep1.eta-lep2.eta) + deltaPhi*deltaPhi) ; evsO->leptonsDeltaPhi[iev] = deltaPhi; } } }
6c05c08bbd3daf022e91d1552465795f4fa0bdda.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <hip/hip_runtime.h> #include "cluster_cuda.cuh" // This assumes address stores the average of n elements atomically updates // address to store the average of n + 1 elements (the n elements as well as // val). This might be useful for updating cluster centers. // modified from http://stackoverflow.com/a/17401122 __device__ float atomicUpdateAverage(float* address, int n, float val) { int* address_as_i = (int*) address; int old = *address_as_i; int assumed; do { assumed = old; float next_val = (n * __int_as_float(assumed) + val) / (n + 1); old = ::atomicCAS(address_as_i, assumed, __float_as_int(next_val)); } while (assumed != old); return __int_as_float(old); } // computes the distance squared between vectors a and b where vectors have // length size and stride stride. __device__ float squared_distance(float *a, float *b, int stride, int size) { float dist = 0.0; for (int i=0; i < size; i++) { float diff = a[stride * i] - b[stride * i]; dist += diff * diff; } return dist; } /* * Notationally, all matrices are column majors, so if I say that matrix Z is * of size m * n, then the stride in the m axis is 1. For purposes of * optimization (particularly coalesced accesses), you can change the format of * any array. * * clusters is a REVIEW_DIM * k array containing the location of each of the k * cluster centers. * * cluster_counts is a k element array containing how many data points are in * each cluster. * * k is the number of clusters. * * data is a REVIEW_DIM * batch_size array containing the batch of reviews to * cluster. Note that each review is contiguous (so elements 0 through 49 are * review 0, ...) * * output is a batch_size array that contains the index of the cluster to which * each review is the closest to. * * batch_size is the number of reviews this kernel must handle. */ __global__ void sloppyClusterKernel(float *clusters, int *cluster_counts, int k, float *data, int *output, int batch_size) { // For each batch of reviews unsigned int review = blockIdx.x * blockDim.x + threadIdx.x; // For each review while (review < batch_size){ /* Checking the distance to all cluster centers */ // Initialize the minimum distance cluster float mindist = squared_distance(&data[review * REVIEW_DIM], &clusters[0], 1, REVIEW_DIM); float mincompare; int minclust = 0; // For each cluster center for (int i = 1; i < k; i++){ mincompare = squared_distance(&data[review * REVIEW_DIM], &clusters[i * REVIEW_DIM], 1, REVIEW_DIM); if (mincompare < mindist){ mindist = mincompare; minclust = i; } } /* Update output array to indicate which cluster this review * belongs to. */ output[review] = minclust; /* Increment the cluster population count to account for the * updated cluster */ atomicAdd(&cluster_counts[minclust], 1); /* Update the cluster center */ // For each cluster center for (int i = 0; i < k; i++){ // For each element of the review for (int j = 0; j < REVIEW_DIM; j++){ clusters[j + (i * REVIEW_DIM)] = atomicUpdateAverage(&clusters[j + (i * REVIEW_DIM)], REVIEW_DIM, data[j + (review * REVIEW_DIM)]); } } review += blockDim.x * gridDim.x; } } void cudaCluster(float *clusters, int *cluster_counts, int k, float *data, int *output, int batch_size, hipStream_t stream) { int block_size = (batch_size < 1024) ? batch_size : 1024; // grid_size = CEIL(batch_size / block_size) int grid_size = (batch_size + block_size - 1) / block_size; int shmem_bytes = 0; hipLaunchKernelGGL(( sloppyClusterKernel), dim3(block_size), dim3(grid_size), shmem_bytes, stream, clusters, cluster_counts, k, data, output, batch_size); }
6c05c08bbd3daf022e91d1552465795f4fa0bdda.cu
#include <cassert> #include <cuda_runtime.h> #include "cluster_cuda.cuh" // This assumes address stores the average of n elements atomically updates // address to store the average of n + 1 elements (the n elements as well as // val). This might be useful for updating cluster centers. // modified from http://stackoverflow.com/a/17401122 __device__ float atomicUpdateAverage(float* address, int n, float val) { int* address_as_i = (int*) address; int old = *address_as_i; int assumed; do { assumed = old; float next_val = (n * __int_as_float(assumed) + val) / (n + 1); old = ::atomicCAS(address_as_i, assumed, __float_as_int(next_val)); } while (assumed != old); return __int_as_float(old); } // computes the distance squared between vectors a and b where vectors have // length size and stride stride. __device__ float squared_distance(float *a, float *b, int stride, int size) { float dist = 0.0; for (int i=0; i < size; i++) { float diff = a[stride * i] - b[stride * i]; dist += diff * diff; } return dist; } /* * Notationally, all matrices are column majors, so if I say that matrix Z is * of size m * n, then the stride in the m axis is 1. For purposes of * optimization (particularly coalesced accesses), you can change the format of * any array. * * clusters is a REVIEW_DIM * k array containing the location of each of the k * cluster centers. * * cluster_counts is a k element array containing how many data points are in * each cluster. * * k is the number of clusters. * * data is a REVIEW_DIM * batch_size array containing the batch of reviews to * cluster. Note that each review is contiguous (so elements 0 through 49 are * review 0, ...) * * output is a batch_size array that contains the index of the cluster to which * each review is the closest to. * * batch_size is the number of reviews this kernel must handle. */ __global__ void sloppyClusterKernel(float *clusters, int *cluster_counts, int k, float *data, int *output, int batch_size) { // For each batch of reviews unsigned int review = blockIdx.x * blockDim.x + threadIdx.x; // For each review while (review < batch_size){ /* Checking the distance to all cluster centers */ // Initialize the minimum distance cluster float mindist = squared_distance(&data[review * REVIEW_DIM], &clusters[0], 1, REVIEW_DIM); float mincompare; int minclust = 0; // For each cluster center for (int i = 1; i < k; i++){ mincompare = squared_distance(&data[review * REVIEW_DIM], &clusters[i * REVIEW_DIM], 1, REVIEW_DIM); if (mincompare < mindist){ mindist = mincompare; minclust = i; } } /* Update output array to indicate which cluster this review * belongs to. */ output[review] = minclust; /* Increment the cluster population count to account for the * updated cluster */ atomicAdd(&cluster_counts[minclust], 1); /* Update the cluster center */ // For each cluster center for (int i = 0; i < k; i++){ // For each element of the review for (int j = 0; j < REVIEW_DIM; j++){ clusters[j + (i * REVIEW_DIM)] = atomicUpdateAverage(&clusters[j + (i * REVIEW_DIM)], REVIEW_DIM, data[j + (review * REVIEW_DIM)]); } } review += blockDim.x * gridDim.x; } } void cudaCluster(float *clusters, int *cluster_counts, int k, float *data, int *output, int batch_size, cudaStream_t stream) { int block_size = (batch_size < 1024) ? batch_size : 1024; // grid_size = CEIL(batch_size / block_size) int grid_size = (batch_size + block_size - 1) / block_size; int shmem_bytes = 0; sloppyClusterKernel<<< block_size, grid_size, shmem_bytes, stream>>>(clusters, cluster_counts, k, data, output, batch_size); }
08bf54c298805dbe862ac1805124b963bd0d99b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THH/THHAtomics.cuh> #include <THH/THHGeneral.h> #include <THH/THHNumerics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { namespace { __device__ inline int start_index(int a, int b, int c) { return (int)::floor((float)(a * c) / b); } __device__ inline int end_index(int a, int b, int c) { return (int)::ceil((float)((a + 1) * c) / b); } // 4d tensor B x D x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void adaptivemaxpool(T *input, T *output, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; // select input/output plane output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; indices = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the mean of the input image... T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; int argmax = istartH * isizeW + istartW; T max = at::numeric_limits<T>::lower_bound(); // -Infinity int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { T val = ptr_input[iw*istrideW]; if ((val > max) || THCNumerics<T>::isnan(val)) { max = val; argmax = (ih+istartH)*isizeW + iw+istartW; } } ptr_input += istrideH; // next input line } // Update output and argmax *ptr_output = max; *ptr_ind = argmax; } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ template <typename T> __global__ void adaptivemaxgradinput(T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; //int k = blockIdx.x % sizeD; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); gradInput[argmax] += z; } } } /* * Description: * this function computes the gradInput from weight and gradOutput * when kH != dH or kW != dW (uses atomic add) */ template <typename T> __global__ void atomicadaptivemaxgradinput( T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); // atomic add since different threads could update same variable gpuAtomicAdd(&(gradInput[argmax]), z); } } } void adaptive_max_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { TensorArg grad_input_arg{ gradInput, "gradInput", 1 }; TensorArg grad_output_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("adaptive_max_pool2d_out_cuda", {grad_input_arg, grad_output_arg, input_arg, indices_arg}); bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests Tensor gradOutput = gradOutput_.contiguous(); if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t osizeH = gradOutput.size(1); int64_t osizeW = gradOutput.size(2); //bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); gradInput.resize_as_(input); gradInput.zero_(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomicadaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel hipLaunchKernelGGL(( atomicadaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } } ); } else { int64_t sizeB = input.size(0); int64_t sizeD = input.size(1); int64_t isizeH = input.size(2); int64_t isizeW = input.size(3); int64_t osizeH = gradOutput.size(2); int64_t osizeW = gradOutput.size(3); gradInput.resize_as_(input); gradInput.zero_(); //bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB*sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomicadaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( adaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } } ); } } } // namespace // 4d tensor B x D x H x W TORCH_IMPL_FUNC(adaptive_max_pool2d_out_cuda) (const Tensor& input, IntArrayRef output_size, const Tensor& output, const Tensor& indices) { TensorArg output_arg{output, "output", 1}; TensorArg indices_arg{indices, "indices", 2}; TensorArg input_arg{input, "input", 3}; checkAllSameGPU( "adaptive_max_pool2d_cuda", {output_arg, indices_arg, input_arg}); int64_t osizeH = output_size[0]; int64_t osizeW = output_size[1]; if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t istrideD = input.stride(0); int64_t istrideH = input.stride(1); int64_t istrideW = input.stride(2); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_cuda", [&] { scalar_t* input_data = input.data_ptr<scalar_t>(); scalar_t* output_data = output.data_ptr<scalar_t>(); int64_t* indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel hipLaunchKernelGGL(( adaptivemaxpool), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } else { Tensor input_ = input.contiguous(); int64_t sizeB = input_.size(0); int64_t sizeD = input_.size(1); int64_t isizeH = input_.size(2); int64_t isizeW = input_.size(3); int64_t istrideD = input_.stride(1); int64_t istrideH = input_.stride(2); int64_t istrideW = input_.stride(3); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input_.scalar_type(), "adaptive_max_pool2d_cuda", [&] { scalar_t* input_data = input_.data_ptr<scalar_t>(); scalar_t* output_data = output.data_ptr<scalar_t>(); int64_t* indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel hipLaunchKernelGGL(( adaptivemaxpool), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } } Tensor& adaptive_max_pool2d_backward_out_cuda(const Tensor& gradOutput_, const Tensor& input, const Tensor& indices, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_max_pool2d_backward_out_cuda"); adaptive_max_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, indices); return gradInput; } Tensor adaptive_max_pool2d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_max_pool2d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); adaptive_max_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, indices); return gradInput; } } // at::native } // at
08bf54c298805dbe862ac1805124b963bd0d99b5.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THC/THCAtomics.cuh> #include <THC/THCGeneral.h> #include <THC/THCNumerics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { namespace { __device__ inline int start_index(int a, int b, int c) { return (int)std::floor((float)(a * c) / b); } __device__ inline int end_index(int a, int b, int c) { return (int)std::ceil((float)((a + 1) * c) / b); } // 4d tensor B x D x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void adaptivemaxpool(T *input, T *output, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; // select input/output plane output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; indices = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the mean of the input image... T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; int argmax = istartH * isizeW + istartW; T max = at::numeric_limits<T>::lower_bound(); // -Infinity int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { T val = ptr_input[iw*istrideW]; if ((val > max) || THCNumerics<T>::isnan(val)) { max = val; argmax = (ih+istartH)*isizeW + iw+istartW; } } ptr_input += istrideH; // next input line } // Update output and argmax *ptr_output = max; *ptr_ind = argmax; } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ template <typename T> __global__ void adaptivemaxgradinput(T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; //int k = blockIdx.x % sizeD; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); gradInput[argmax] += z; } } } /* * Description: * this function computes the gradInput from weight and gradOutput * when kH != dH or kW != dW (uses atomic add) */ template <typename T> __global__ void atomicadaptivemaxgradinput( T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); // atomic add since different threads could update same variable gpuAtomicAdd(&(gradInput[argmax]), z); } } } void adaptive_max_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { TensorArg grad_input_arg{ gradInput, "gradInput", 1 }; TensorArg grad_output_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("adaptive_max_pool2d_out_cuda", {grad_input_arg, grad_output_arg, input_arg, indices_arg}); bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests Tensor gradOutput = gradOutput_.contiguous(); if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t osizeH = gradOutput.size(1); int64_t osizeW = gradOutput.size(2); //bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); gradInput.resize_as_(input); gradInput.zero_(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically atomicadaptivemaxgradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel atomicadaptivemaxgradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } ); } else { int64_t sizeB = input.size(0); int64_t sizeD = input.size(1); int64_t isizeH = input.size(2); int64_t isizeW = input.size(3); int64_t osizeH = gradOutput.size(2); int64_t osizeW = gradOutput.size(3); gradInput.resize_as_(input); gradInput.zero_(); //bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB*sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically atomicadaptivemaxgradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel, accumulate gradients atomically adaptivemaxgradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } ); } } } // namespace // 4d tensor B x D x H x W TORCH_IMPL_FUNC(adaptive_max_pool2d_out_cuda) (const Tensor& input, IntArrayRef output_size, const Tensor& output, const Tensor& indices) { TensorArg output_arg{output, "output", 1}; TensorArg indices_arg{indices, "indices", 2}; TensorArg input_arg{input, "input", 3}; checkAllSameGPU( "adaptive_max_pool2d_cuda", {output_arg, indices_arg, input_arg}); int64_t osizeH = output_size[0]; int64_t osizeW = output_size[1]; if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t istrideD = input.stride(0); int64_t istrideH = input.stride(1); int64_t istrideW = input.stride(2); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_cuda", [&] { scalar_t* input_data = input.data_ptr<scalar_t>(); scalar_t* output_data = output.data_ptr<scalar_t>(); int64_t* indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel adaptivemaxpool<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } else { Tensor input_ = input.contiguous(); int64_t sizeB = input_.size(0); int64_t sizeD = input_.size(1); int64_t isizeH = input_.size(2); int64_t isizeW = input_.size(3); int64_t istrideD = input_.stride(1); int64_t istrideH = input_.stride(2); int64_t istrideW = input_.stride(3); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input_.scalar_type(), "adaptive_max_pool2d_cuda", [&] { scalar_t* input_data = input_.data_ptr<scalar_t>(); scalar_t* output_data = output.data_ptr<scalar_t>(); int64_t* indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel adaptivemaxpool<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } } Tensor& adaptive_max_pool2d_backward_out_cuda(const Tensor& gradOutput_, const Tensor& input, const Tensor& indices, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_max_pool2d_backward_out_cuda"); adaptive_max_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, indices); return gradInput; } Tensor adaptive_max_pool2d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_max_pool2d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); adaptive_max_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, indices); return gradInput; } } // at::native } // at
d8a551b4f2681862518461bde57e0d2902da672b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file test_warp_tools.cu * \brief CUDA kernels for testing warp-level primitives. */ #include "test_warp_tools.cuh" #include "hoomd/WarpTools.cuh" #ifdef __HIP_PLATFORM_HCC__ #define BLOCK_SIZE 64 #define MAX_TPP 64 #else #define BLOCK_SIZE 32 #define MAX_TPP 32 #endif //! Performs an iterative warp reduction on a data set using \a tpp threads per row. /*! * \param d_data Data to scan as a N x width matrix. * \param d_reduce Output of the reduction at each step. * \param d_sum Total sum for each row of data. * \param N Number of rows in data. * \param width Number of entries to scan. * \param reduce_idx Indexer for saving intermediate results of reduction. * \tparam tpp Number of threads to use per row in \a d_data . * * The kernel is launched with \a tpp threads working per row in \a d_data, which has \a N rows and \a width entries * per row. This sub-warp group then iterates through the data in the row, performing a reduction at each iteration. * The result of the reduction is saved into \a d_reduce for each iteration. The total sum is also accumulated * into \a d_sum. * * This test kernel is more complicated than the basic tests that CUB runs for WarpReduce. The reason for this is to * emulate a use-case in HOOMD, namely the force accumulation using multiple threads per particle. */ template<int tpp> __global__ void warp_reduce_kernel(const int* d_data, int* d_reduce, int* d_sum, const unsigned int N, const unsigned int width, const Index2D reduce_idx) { // thread id in the global grid const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // row of data that this thread operates on const unsigned int idx = tid / tpp; // index of thread within the sub warp const unsigned int cta_idx = threadIdx.x % tpp; if (idx >= N) return; int sum(0), cntr(0); unsigned int offset = cta_idx; bool done = false; while (!done) { // load in data int thread_data; if (offset < width) { thread_data = d_data[idx * width + offset]; } else { thread_data = 0; done = true; } offset += tpp; // only scan if sub warp still has work to do done = hoomd::detail::WarpScan<bool,tpp>().Broadcast(done, 0); if (!done) { // scan the thread data int sum_iter = hoomd::detail::WarpReduce<int,tpp>().Sum(thread_data); // save reduce result for this iteration if (cta_idx == 0) d_reduce[reduce_idx(idx,cntr)] = sum_iter; // accumulate total sum sum += sum_iter; ++cntr; } } // thread 0 writes out accumulated sum if (cta_idx == 0) { d_sum[idx] = sum; } } // Dispatch for warp reduction based on requested threads per particle. /*! * \param params Reduction parameters. * \tparam tpp Number of threads to try to launch. * * This recursive template compiles the kernel for all valid threads per particle (powers of 2 from 1 to 64), and only * executes the kernel for the number of threads that is equal to the value specified in \a params. */ template<int tpp> void warp_reduce_launcher(const reduce_params& params) { if (tpp == params.tpp) { dim3 grid((params.N*tpp+BLOCK_SIZE-1)/BLOCK_SIZE); hipLaunchKernelGGL((warp_reduce_kernel<tpp>), dim3(grid), dim3(BLOCK_SIZE), 0, 0, params.data, params.reduce, params.sum, params.N, params.width, params.reduce_idx); } else { warp_reduce_launcher<tpp/2>(params); } } //! Terminates the recursive template. template<> void warp_reduce_launcher<0>(const reduce_params& params) { } /*! * \params Scan parameters. * * The scan results are first memset to zero. */ void warp_reduce(const reduce_params& params) { hipMemset(params.reduce, 0, params.reduce_idx.getNumElements() * sizeof(int)); hipMemset(params.sum, 0, params.N * sizeof(int)); warp_reduce_launcher<MAX_TPP>(params); } //! Performs an iterative warp scan on a data set using \a tpp threads per row. /*! * \param d_data Data to scan as a N x width matrix. * \param d_scan Output of the scan at each step of sum. * \param d_sum Total sum for each row of data. * \param N Number of rows in data. * \param width Number of entries to scan. * \param scan_idx Indexer for saving intermediate results of scan. * \tparam tpp Number of threads to use per row in \a d_data . * * The kernel is launched with \a tpp threads working per row in \a d_data, which has \a N rows and \a width entries * per row. This sub-warp group then iterates through the data in the row, performing an exclusive sum at each iteration. * The result of the scan is saved into \a d_scan for each thread along with the aggregate at each iteration. The total * sum is also accumulated into \a d_sum. * * This test kernel is more complicated than the basic tests that CUB runs for WarpScan. The reason for this is to * emulate a use-case in HOOMD, namely the neighbor list generation using multiple threads per particle. */ template<int tpp> __global__ void warp_scan_kernel(const int* d_data, int* d_scan, int* d_sum, const unsigned int N, const unsigned int width, const Index3D scan_idx) { // thread id in the global grid const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // row of data that this thread operates on const unsigned int idx = tid / tpp; // index of thread within the sub warp const unsigned int cta_idx = threadIdx.x % tpp; if (idx >= N) return; int sum(0), cntr(0); unsigned int offset = cta_idx; bool done = false; while (!done) { // load in data int thread_data; if (offset < width) { thread_data = d_data[idx * width + offset]; } else { thread_data = 0; done = true; } offset += tpp; // only scan if sub warp still has work to do done = hoomd::detail::WarpScan<bool,tpp>().Broadcast(done, 0); if (!done) { // scan the thread data int sum_iter(0); hoomd::detail::WarpScan<int,tpp>().ExclusiveSum(thread_data, thread_data, sum_iter); // save scan result for this iteration d_scan[scan_idx(idx,cta_idx,cntr)] = thread_data; if (cta_idx == 0) d_scan[scan_idx(idx,tpp,cntr)] = sum_iter; // accumulate total sum sum += sum_iter; ++cntr; } } // thread 0 writes out accumulated sum if (cta_idx == 0) { d_sum[idx] = sum; } } // Dispatch for warp scan based on requested threads per particle. /*! * \param params Scan parameters. * \tparam tpp Number of threads to try to launch. * * This recursive template compiles the kernel for all valid threads per particle (powers of 2 from 1 to 64) and only * executes the kernel for the number of threads that is equal to the value specified in \a params. */ template<int tpp> void warp_scan_launcher(const scan_params& params) { if (tpp == params.tpp) { dim3 grid((params.N*tpp+BLOCK_SIZE-1)/BLOCK_SIZE); hipLaunchKernelGGL((warp_scan_kernel<tpp>), dim3(grid), dim3(BLOCK_SIZE), 0, 0, params.data, params.scan, params.sum, params.N, params.width, params.scan_idx); } else { warp_scan_launcher<tpp/2>(params); } } //! Terminates the recursive template. template<> void warp_scan_launcher<0>(const scan_params& params) { } /*! * \params Scan parameters. * * The scan results are first memset to zero. */ void warp_scan(const scan_params& params) { hipMemset(params.scan, 0, params.scan_idx.getNumElements() * sizeof(int)); hipMemset(params.sum, 0, params.N * sizeof(int)); warp_scan_launcher<MAX_TPP>(params); }
d8a551b4f2681862518461bde57e0d2902da672b.cu
// Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file test_warp_tools.cu * \brief CUDA kernels for testing warp-level primitives. */ #include "test_warp_tools.cuh" #include "hoomd/WarpTools.cuh" #ifdef __HIP_PLATFORM_HCC__ #define BLOCK_SIZE 64 #define MAX_TPP 64 #else #define BLOCK_SIZE 32 #define MAX_TPP 32 #endif //! Performs an iterative warp reduction on a data set using \a tpp threads per row. /*! * \param d_data Data to scan as a N x width matrix. * \param d_reduce Output of the reduction at each step. * \param d_sum Total sum for each row of data. * \param N Number of rows in data. * \param width Number of entries to scan. * \param reduce_idx Indexer for saving intermediate results of reduction. * \tparam tpp Number of threads to use per row in \a d_data . * * The kernel is launched with \a tpp threads working per row in \a d_data, which has \a N rows and \a width entries * per row. This sub-warp group then iterates through the data in the row, performing a reduction at each iteration. * The result of the reduction is saved into \a d_reduce for each iteration. The total sum is also accumulated * into \a d_sum. * * This test kernel is more complicated than the basic tests that CUB runs for WarpReduce. The reason for this is to * emulate a use-case in HOOMD, namely the force accumulation using multiple threads per particle. */ template<int tpp> __global__ void warp_reduce_kernel(const int* d_data, int* d_reduce, int* d_sum, const unsigned int N, const unsigned int width, const Index2D reduce_idx) { // thread id in the global grid const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // row of data that this thread operates on const unsigned int idx = tid / tpp; // index of thread within the sub warp const unsigned int cta_idx = threadIdx.x % tpp; if (idx >= N) return; int sum(0), cntr(0); unsigned int offset = cta_idx; bool done = false; while (!done) { // load in data int thread_data; if (offset < width) { thread_data = d_data[idx * width + offset]; } else { thread_data = 0; done = true; } offset += tpp; // only scan if sub warp still has work to do done = hoomd::detail::WarpScan<bool,tpp>().Broadcast(done, 0); if (!done) { // scan the thread data int sum_iter = hoomd::detail::WarpReduce<int,tpp>().Sum(thread_data); // save reduce result for this iteration if (cta_idx == 0) d_reduce[reduce_idx(idx,cntr)] = sum_iter; // accumulate total sum sum += sum_iter; ++cntr; } } // thread 0 writes out accumulated sum if (cta_idx == 0) { d_sum[idx] = sum; } } // Dispatch for warp reduction based on requested threads per particle. /*! * \param params Reduction parameters. * \tparam tpp Number of threads to try to launch. * * This recursive template compiles the kernel for all valid threads per particle (powers of 2 from 1 to 64), and only * executes the kernel for the number of threads that is equal to the value specified in \a params. */ template<int tpp> void warp_reduce_launcher(const reduce_params& params) { if (tpp == params.tpp) { dim3 grid((params.N*tpp+BLOCK_SIZE-1)/BLOCK_SIZE); hipLaunchKernelGGL((warp_reduce_kernel<tpp>), dim3(grid), dim3(BLOCK_SIZE), 0, 0, params.data, params.reduce, params.sum, params.N, params.width, params.reduce_idx); } else { warp_reduce_launcher<tpp/2>(params); } } //! Terminates the recursive template. template<> void warp_reduce_launcher<0>(const reduce_params& params) { } /*! * \params Scan parameters. * * The scan results are first memset to zero. */ void warp_reduce(const reduce_params& params) { hipMemset(params.reduce, 0, params.reduce_idx.getNumElements() * sizeof(int)); hipMemset(params.sum, 0, params.N * sizeof(int)); warp_reduce_launcher<MAX_TPP>(params); } //! Performs an iterative warp scan on a data set using \a tpp threads per row. /*! * \param d_data Data to scan as a N x width matrix. * \param d_scan Output of the scan at each step of sum. * \param d_sum Total sum for each row of data. * \param N Number of rows in data. * \param width Number of entries to scan. * \param scan_idx Indexer for saving intermediate results of scan. * \tparam tpp Number of threads to use per row in \a d_data . * * The kernel is launched with \a tpp threads working per row in \a d_data, which has \a N rows and \a width entries * per row. This sub-warp group then iterates through the data in the row, performing an exclusive sum at each iteration. * The result of the scan is saved into \a d_scan for each thread along with the aggregate at each iteration. The total * sum is also accumulated into \a d_sum. * * This test kernel is more complicated than the basic tests that CUB runs for WarpScan. The reason for this is to * emulate a use-case in HOOMD, namely the neighbor list generation using multiple threads per particle. */ template<int tpp> __global__ void warp_scan_kernel(const int* d_data, int* d_scan, int* d_sum, const unsigned int N, const unsigned int width, const Index3D scan_idx) { // thread id in the global grid const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // row of data that this thread operates on const unsigned int idx = tid / tpp; // index of thread within the sub warp const unsigned int cta_idx = threadIdx.x % tpp; if (idx >= N) return; int sum(0), cntr(0); unsigned int offset = cta_idx; bool done = false; while (!done) { // load in data int thread_data; if (offset < width) { thread_data = d_data[idx * width + offset]; } else { thread_data = 0; done = true; } offset += tpp; // only scan if sub warp still has work to do done = hoomd::detail::WarpScan<bool,tpp>().Broadcast(done, 0); if (!done) { // scan the thread data int sum_iter(0); hoomd::detail::WarpScan<int,tpp>().ExclusiveSum(thread_data, thread_data, sum_iter); // save scan result for this iteration d_scan[scan_idx(idx,cta_idx,cntr)] = thread_data; if (cta_idx == 0) d_scan[scan_idx(idx,tpp,cntr)] = sum_iter; // accumulate total sum sum += sum_iter; ++cntr; } } // thread 0 writes out accumulated sum if (cta_idx == 0) { d_sum[idx] = sum; } } // Dispatch for warp scan based on requested threads per particle. /*! * \param params Scan parameters. * \tparam tpp Number of threads to try to launch. * * This recursive template compiles the kernel for all valid threads per particle (powers of 2 from 1 to 64) and only * executes the kernel for the number of threads that is equal to the value specified in \a params. */ template<int tpp> void warp_scan_launcher(const scan_params& params) { if (tpp == params.tpp) { dim3 grid((params.N*tpp+BLOCK_SIZE-1)/BLOCK_SIZE); hipLaunchKernelGGL((warp_scan_kernel<tpp>), dim3(grid), dim3(BLOCK_SIZE), 0, 0, params.data, params.scan, params.sum, params.N, params.width, params.scan_idx); } else { warp_scan_launcher<tpp/2>(params); } } //! Terminates the recursive template. template<> void warp_scan_launcher<0>(const scan_params& params) { } /*! * \params Scan parameters. * * The scan results are first memset to zero. */ void warp_scan(const scan_params& params) { hipMemset(params.scan, 0, params.scan_idx.getNumElements() * sizeof(int)); hipMemset(params.sum, 0, params.N * sizeof(int)); warp_scan_launcher<MAX_TPP>(params); }
4a0119b92ffb3d963b16e2c657de62c439f9853a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/core/TensorAccessor.h> #include <ATen/hip/HIPContext.h> #include <c10/hip/HIPException.h> #include <limits.h> #include <torch/torch.h> #include <hipcub/hipcub.hpp> using namespace torch::indexing; namespace { constexpr int kNumThreads = 1024; // Number of threads to run CUDA kernel in parallel. constexpr int kBackPtrBufferSize = 100; // Buffer size of backPtr on GPU. The data is transferred to CPU once // the buffer reaches this max size. } // anonymous namespace namespace torchaudio { namespace alignment { namespace gpu { template <typename scalar_t, typename target_t> __global__ void falign_cuda_step_kernel( const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> logProbs_a, const torch::PackedTensorAccessor32<target_t, 2, torch::RestrictPtrTraits> targets_a, const int T, const int L, const int N, const int R, const int t, const int64_t blank, int start, int end, int backPtrBufferLen, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> alphas_a, torch::PackedTensorAccessor32<int8_t, 2, torch::RestrictPtrTraits> backPtrBuffer_a) { scalar_t kNegInfinity = -std::numeric_limits<scalar_t>::infinity(); const int batchIndex = 0; // TODO: support batch version and use the real batch index int S = 2 * L + 1; int curIdxOffset = (t % 2); // current time step frame for alpha int prevIdxOffset = ((t - 1) % 2); // previous time step frame for alpha // reset alpha and backPtrBuffer values for (unsigned int i = threadIdx.x; i < S; i += blockDim.x) { alphas_a[curIdxOffset][i] = kNegInfinity; backPtrBuffer_a[backPtrBufferLen][i] = -1; } // This sync could potentially be removed through careful indexing inside each // thread for the above for loop. But this is okay for now. __syncthreads(); if (t == 0) { for (unsigned int i = start + threadIdx.x; i < end; i += blockDim.x) { int labelIdx = (i % 2 == 0) ? blank : targets_a[batchIndex][i / 2]; alphas_a[curIdxOffset][i] = logProbs_a[batchIndex][0][labelIdx]; } return; } using BlockReduce = hipcub::BlockReduce<scalar_t, kNumThreads>; __shared__ typename BlockReduce::TempStorage tempStorage; __shared__ scalar_t maxValue; scalar_t threadMax; int startloop = start; threadMax = kNegInfinity; if (start == 0 && threadIdx.x == 0) { alphas_a[curIdxOffset][0] = alphas_a[prevIdxOffset][0] + logProbs_a[batchIndex][t][blank]; threadMax = max(threadMax, alphas_a[curIdxOffset][0]); backPtrBuffer_a[backPtrBufferLen][0] = 0; } if (start == 0) { startloop += 1; } for (unsigned int i = startloop + threadIdx.x; i < end; i += blockDim.x) { scalar_t x0 = alphas_a[prevIdxOffset][i]; scalar_t x1 = alphas_a[prevIdxOffset][i - 1]; scalar_t x2 = kNegInfinity; int labelIdx = (i % 2 == 0) ? blank : targets_a[batchIndex][i / 2]; if (i % 2 != 0 && i != 1 && targets_a[batchIndex][i / 2] != targets_a[batchIndex][i / 2 - 1]) { x2 = alphas_a[prevIdxOffset][i - 2]; } scalar_t result = 0.0; if (x2 > x1 && x2 > x0) { result = x2; backPtrBuffer_a[backPtrBufferLen][i] = 2; } else if (x1 > x0 && x1 > x2) { result = x1; backPtrBuffer_a[backPtrBufferLen][i] = 1; } else { result = x0; backPtrBuffer_a[backPtrBufferLen][i] = 0; } alphas_a[curIdxOffset][i] = result + logProbs_a[batchIndex][t][labelIdx]; threadMax = max(threadMax, alphas_a[curIdxOffset][i]); } scalar_t maxResult = BlockReduce(tempStorage).Reduce(threadMax, hipcub::Max()); if (threadIdx.x == 0) { maxValue = maxResult; } __syncthreads(); // normalize alpha values so that they don't overflow for large T for (unsigned int i = threadIdx.x; i < S; i += blockDim.x) { alphas_a[curIdxOffset][i] -= maxValue; } } template <typename scalar_t, torch::ScalarType target_scalar_type> void forced_align_impl( const torch::Tensor& logProbs, const torch::Tensor& targets, const int64_t blank, torch::Tensor& paths) { auto defaultStream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto cpuDataTranferStream = at::hip::getStreamFromPoolMasqueradingAsCUDA(); const scalar_t kNegInfinity = -std::numeric_limits<scalar_t>::infinity(); using target_t = typename std:: conditional<target_scalar_type == torch::kInt, int, int64_t>::type; auto paths_a = paths.accessor<target_t, 2>(); const int batchIndex = 0; // TODO: support batch version and use the real batch index const int T = logProbs.size(1); // num frames const int N = logProbs.size(2); // alphabet size const int L = targets.size(1); // label length const int S = 2 * L + 1; auto targetsCpu = targets.to(torch::kCPU); // backPtrBuffer stores the index offset fthe best path at current position // We copy the values to CPU after running every kBackPtrBufferSize of // frames. torch::Tensor backPtrBuffer = torch::empty( {min(kBackPtrBufferSize, T), S}, torch::TensorOptions().dtype(torch::kInt8).device(logProbs.device())) .contiguous() .fill_(-1); torch::Tensor backPtrCpu = torch::empty( {T, S}, torch::TensorOptions().dtype(torch::kInt8).device(torch::kCPU)) .contiguous() .fill_(-1); // we store only two time frames for alphas // alphas for compute current timeframe can be computed only from previous // time frame. torch::Tensor alphas = torch::empty( {2, S}, torch::TensorOptions() .dtype(logProbs.dtype()) .device(logProbs.device())) .fill_(kNegInfinity); // CPU accessors auto targetsCpu_a = targetsCpu.accessor<target_t, 2>(); auto backPtrCpu_a = backPtrCpu.accessor<int8_t, 2>(); // count the number of repeats in label int R = 0; for (int i = 1; i < L; ++i) { if (targetsCpu_a[batchIndex][i] == targetsCpu_a[batchIndex][i - 1]) { ++R; } } TORCH_CHECK( T >= L + R, "targets length is too long for CTC. Found targets length: ", T, ", log_probs length: ", L, ", and number of repeats: ", R); int start = (T - (L + R)) > 0 ? 0 : 1; int end = (S == 1) ? 1 : 2; int backPtrBufferLen = 0; torch::Tensor bufferCopy; for (int t = 0; t < T; ++t) { if (t > 0) { if (T - t <= L + R) { if ((start % 2 == 1) && (targetsCpu_a[batchIndex][start / 2] != targetsCpu_a[batchIndex][start / 2 + 1])) { start = start + 1; } start = start + 1; } if (t <= L + R) { if ((end % 2 == 0) && (end < 2 * L) && (targetsCpu_a[batchIndex][end / 2 - 1] != targetsCpu_a[batchIndex][end / 2])) { end = end + 1; } end = end + 1; } } hipLaunchKernelGGL(( falign_cuda_step_kernel<scalar_t, target_t>) , dim3(1), dim3(kNumThreads), 0, defaultStream, logProbs.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(), targets.packed_accessor32<target_t, 2, torch::RestrictPtrTraits>(), T, L, N, R, t, blank, start, end, backPtrBufferLen, alphas.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), backPtrBuffer .packed_accessor32<int8_t, 2, torch::RestrictPtrTraits>()); C10_HIP_KERNEL_LAUNCH_CHECK(); ++backPtrBufferLen; if (backPtrBufferLen == kBackPtrBufferSize || t == T - 1) { cpuDataTranferStream.synchronize(); // GPU -> GPU copy bufferCopy = backPtrBuffer.clone().contiguous(); defaultStream.synchronize(); at::hip::setCurrentHIPStreamMasqueradingAsCUDA(cpuDataTranferStream); // Copy ASYNC from GPU to CPU int64_t offset = static_cast<int64_t>(t + 1 - backPtrBufferLen) * S * sizeof(int8_t); C10_HIP_CHECK(hipMemcpyAsync( static_cast<int8_t*>(backPtrCpu.data_ptr()) + offset, bufferCopy.data_ptr(), backPtrBufferLen * S * sizeof(int8_t), hipMemcpyDeviceToHost, cpuDataTranferStream)); at::hip::setCurrentHIPStreamMasqueradingAsCUDA(defaultStream); backPtrBufferLen = 0; } } cpuDataTranferStream.synchronize(); torch::Tensor alphasCpu = alphas.to(torch::kCPU); auto alphasCpu_a = alphasCpu.accessor<scalar_t, 2>(); int curIdxOffset = ((T - 1) % 2); int ltrIdx = alphasCpu_a[curIdxOffset][S - 1] > alphasCpu_a[curIdxOffset][S - 2] ? S - 1 : S - 2; int indexScores = 0; for (int t = T - 1; t >= 0; --t) { auto lbl_idx = ltrIdx % 2 == 0 ? blank : targetsCpu_a[batchIndex][ltrIdx / 2]; paths_a[batchIndex][t] = lbl_idx; ++indexScores; ltrIdx -= backPtrCpu_a[t][ltrIdx]; } } std::tuple<torch::Tensor, torch::Tensor> compute( const torch::Tensor& logProbs, const torch::Tensor& targets, const torch::Tensor& inputLengths, const torch::Tensor& targetLengths, const int64_t blank) { TORCH_CHECK(logProbs.is_cuda(), "log_probs must be a CUDA tensor"); TORCH_CHECK(targets.is_cuda(), "targets must be a CUDA tensor"); TORCH_CHECK( logProbs.device() == targets.device(), "log_probs and targets need to be on the same device"); TORCH_CHECK( logProbs.dtype() == torch::kFloat64 || logProbs.dtype() == torch::kFloat32 || logProbs.dtype() == torch::kFloat16, "log_probs must be float64, float32 or float16 (half) type"); TORCH_CHECK( targets.dtype() == torch::kInt32 || targets.dtype() == torch::kInt64, "targets must be int32 or int64 type"); TORCH_CHECK(logProbs.is_contiguous(), "log_probs must be contiguous"); TORCH_CHECK(targets.is_contiguous(), "targets must be contiguous"); TORCH_CHECK( logProbs.dim() == 3, "log_probs must be 3-D (batch_size, input length, num classes)"); TORCH_CHECK( targets.dim() == 2, "targets must be 2-D (batch_size, target length,)"); TORCH_CHECK( inputLengths.dim() == 1, "input_lengths must be 1-D (batch_size,)"); TORCH_CHECK( targetLengths.dim() == 1, "target_lengths must be 1-D (batch_size,)"); TORCH_CHECK( logProbs.size(0) == 1, "The batch dimension for log_probs must be 1 at the current version.") TORCH_CHECK( targets.size(0) == 1, "The batch dimension for targets must be 1 at the current version.") TORCH_CHECK( blank >= 0 && blank < logProbs.size(-1), "blank must be within [0, num classes)"); TORCH_CHECK( logProbs.size(1) == at::max(inputLengths).item().toInt(), "input length mismatch"); TORCH_CHECK( targets.size(1) == at::max(targetLengths).item().toInt(), "target length mismatch"); auto B = logProbs.size(0); auto T = logProbs.size(1); // num frames auto paths = torch::zeros( {B, T}, torch::TensorOptions().device(torch::kCPU).dtype(targets.dtype())); AT_DISPATCH_FLOATING_TYPES_AND_HALF( logProbs.scalar_type(), "forced_align_impl", [&] { if (targets.scalar_type() == torch::kInt64) { forced_align_impl<scalar_t, torch::kInt64>( logProbs, targets, blank, paths); } else { forced_align_impl<scalar_t, torch::kInt32>( logProbs, targets, blank, paths); } }); return std::make_tuple( paths.to(logProbs.device()), logProbs.index( {torch::indexing::Slice(), torch::linspace( 0, T - 1, T, torch::TensorOptions().dtype(paths.dtype())), paths.index({0})})); } TORCH_LIBRARY_IMPL(torchaudio, CUDA, m) { m.impl("forced_align", &compute); } } // namespace gpu } // namespace alignment } // namespace torchaudio
4a0119b92ffb3d963b16e2c657de62c439f9853a.cu
#include <ATen/core/TensorAccessor.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAException.h> #include <limits.h> #include <torch/torch.h> #include <cub/cub.cuh> using namespace torch::indexing; namespace { constexpr int kNumThreads = 1024; // Number of threads to run CUDA kernel in parallel. constexpr int kBackPtrBufferSize = 100; // Buffer size of backPtr on GPU. The data is transferred to CPU once // the buffer reaches this max size. } // anonymous namespace namespace torchaudio { namespace alignment { namespace gpu { template <typename scalar_t, typename target_t> __global__ void falign_cuda_step_kernel( const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> logProbs_a, const torch::PackedTensorAccessor32<target_t, 2, torch::RestrictPtrTraits> targets_a, const int T, const int L, const int N, const int R, const int t, const int64_t blank, int start, int end, int backPtrBufferLen, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> alphas_a, torch::PackedTensorAccessor32<int8_t, 2, torch::RestrictPtrTraits> backPtrBuffer_a) { scalar_t kNegInfinity = -std::numeric_limits<scalar_t>::infinity(); const int batchIndex = 0; // TODO: support batch version and use the real batch index int S = 2 * L + 1; int curIdxOffset = (t % 2); // current time step frame for alpha int prevIdxOffset = ((t - 1) % 2); // previous time step frame for alpha // reset alpha and backPtrBuffer values for (unsigned int i = threadIdx.x; i < S; i += blockDim.x) { alphas_a[curIdxOffset][i] = kNegInfinity; backPtrBuffer_a[backPtrBufferLen][i] = -1; } // This sync could potentially be removed through careful indexing inside each // thread for the above for loop. But this is okay for now. __syncthreads(); if (t == 0) { for (unsigned int i = start + threadIdx.x; i < end; i += blockDim.x) { int labelIdx = (i % 2 == 0) ? blank : targets_a[batchIndex][i / 2]; alphas_a[curIdxOffset][i] = logProbs_a[batchIndex][0][labelIdx]; } return; } using BlockReduce = cub::BlockReduce<scalar_t, kNumThreads>; __shared__ typename BlockReduce::TempStorage tempStorage; __shared__ scalar_t maxValue; scalar_t threadMax; int startloop = start; threadMax = kNegInfinity; if (start == 0 && threadIdx.x == 0) { alphas_a[curIdxOffset][0] = alphas_a[prevIdxOffset][0] + logProbs_a[batchIndex][t][blank]; threadMax = max(threadMax, alphas_a[curIdxOffset][0]); backPtrBuffer_a[backPtrBufferLen][0] = 0; } if (start == 0) { startloop += 1; } for (unsigned int i = startloop + threadIdx.x; i < end; i += blockDim.x) { scalar_t x0 = alphas_a[prevIdxOffset][i]; scalar_t x1 = alphas_a[prevIdxOffset][i - 1]; scalar_t x2 = kNegInfinity; int labelIdx = (i % 2 == 0) ? blank : targets_a[batchIndex][i / 2]; if (i % 2 != 0 && i != 1 && targets_a[batchIndex][i / 2] != targets_a[batchIndex][i / 2 - 1]) { x2 = alphas_a[prevIdxOffset][i - 2]; } scalar_t result = 0.0; if (x2 > x1 && x2 > x0) { result = x2; backPtrBuffer_a[backPtrBufferLen][i] = 2; } else if (x1 > x0 && x1 > x2) { result = x1; backPtrBuffer_a[backPtrBufferLen][i] = 1; } else { result = x0; backPtrBuffer_a[backPtrBufferLen][i] = 0; } alphas_a[curIdxOffset][i] = result + logProbs_a[batchIndex][t][labelIdx]; threadMax = max(threadMax, alphas_a[curIdxOffset][i]); } scalar_t maxResult = BlockReduce(tempStorage).Reduce(threadMax, cub::Max()); if (threadIdx.x == 0) { maxValue = maxResult; } __syncthreads(); // normalize alpha values so that they don't overflow for large T for (unsigned int i = threadIdx.x; i < S; i += blockDim.x) { alphas_a[curIdxOffset][i] -= maxValue; } } template <typename scalar_t, torch::ScalarType target_scalar_type> void forced_align_impl( const torch::Tensor& logProbs, const torch::Tensor& targets, const int64_t blank, torch::Tensor& paths) { auto defaultStream = at::cuda::getCurrentCUDAStream(); auto cpuDataTranferStream = at::cuda::getStreamFromPool(); const scalar_t kNegInfinity = -std::numeric_limits<scalar_t>::infinity(); using target_t = typename std:: conditional<target_scalar_type == torch::kInt, int, int64_t>::type; auto paths_a = paths.accessor<target_t, 2>(); const int batchIndex = 0; // TODO: support batch version and use the real batch index const int T = logProbs.size(1); // num frames const int N = logProbs.size(2); // alphabet size const int L = targets.size(1); // label length const int S = 2 * L + 1; auto targetsCpu = targets.to(torch::kCPU); // backPtrBuffer stores the index offset fthe best path at current position // We copy the values to CPU after running every kBackPtrBufferSize of // frames. torch::Tensor backPtrBuffer = torch::empty( {min(kBackPtrBufferSize, T), S}, torch::TensorOptions().dtype(torch::kInt8).device(logProbs.device())) .contiguous() .fill_(-1); torch::Tensor backPtrCpu = torch::empty( {T, S}, torch::TensorOptions().dtype(torch::kInt8).device(torch::kCPU)) .contiguous() .fill_(-1); // we store only two time frames for alphas // alphas for compute current timeframe can be computed only from previous // time frame. torch::Tensor alphas = torch::empty( {2, S}, torch::TensorOptions() .dtype(logProbs.dtype()) .device(logProbs.device())) .fill_(kNegInfinity); // CPU accessors auto targetsCpu_a = targetsCpu.accessor<target_t, 2>(); auto backPtrCpu_a = backPtrCpu.accessor<int8_t, 2>(); // count the number of repeats in label int R = 0; for (int i = 1; i < L; ++i) { if (targetsCpu_a[batchIndex][i] == targetsCpu_a[batchIndex][i - 1]) { ++R; } } TORCH_CHECK( T >= L + R, "targets length is too long for CTC. Found targets length: ", T, ", log_probs length: ", L, ", and number of repeats: ", R); int start = (T - (L + R)) > 0 ? 0 : 1; int end = (S == 1) ? 1 : 2; int backPtrBufferLen = 0; torch::Tensor bufferCopy; for (int t = 0; t < T; ++t) { if (t > 0) { if (T - t <= L + R) { if ((start % 2 == 1) && (targetsCpu_a[batchIndex][start / 2] != targetsCpu_a[batchIndex][start / 2 + 1])) { start = start + 1; } start = start + 1; } if (t <= L + R) { if ((end % 2 == 0) && (end < 2 * L) && (targetsCpu_a[batchIndex][end / 2 - 1] != targetsCpu_a[batchIndex][end / 2])) { end = end + 1; } end = end + 1; } } falign_cuda_step_kernel<scalar_t, target_t> <<<1, kNumThreads, 0, defaultStream>>>( logProbs.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(), targets.packed_accessor32<target_t, 2, torch::RestrictPtrTraits>(), T, L, N, R, t, blank, start, end, backPtrBufferLen, alphas.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), backPtrBuffer .packed_accessor32<int8_t, 2, torch::RestrictPtrTraits>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); ++backPtrBufferLen; if (backPtrBufferLen == kBackPtrBufferSize || t == T - 1) { cpuDataTranferStream.synchronize(); // GPU -> GPU copy bufferCopy = backPtrBuffer.clone().contiguous(); defaultStream.synchronize(); at::cuda::setCurrentCUDAStream(cpuDataTranferStream); // Copy ASYNC from GPU to CPU int64_t offset = static_cast<int64_t>(t + 1 - backPtrBufferLen) * S * sizeof(int8_t); C10_CUDA_CHECK(cudaMemcpyAsync( static_cast<int8_t*>(backPtrCpu.data_ptr()) + offset, bufferCopy.data_ptr(), backPtrBufferLen * S * sizeof(int8_t), cudaMemcpyDeviceToHost, cpuDataTranferStream)); at::cuda::setCurrentCUDAStream(defaultStream); backPtrBufferLen = 0; } } cpuDataTranferStream.synchronize(); torch::Tensor alphasCpu = alphas.to(torch::kCPU); auto alphasCpu_a = alphasCpu.accessor<scalar_t, 2>(); int curIdxOffset = ((T - 1) % 2); int ltrIdx = alphasCpu_a[curIdxOffset][S - 1] > alphasCpu_a[curIdxOffset][S - 2] ? S - 1 : S - 2; int indexScores = 0; for (int t = T - 1; t >= 0; --t) { auto lbl_idx = ltrIdx % 2 == 0 ? blank : targetsCpu_a[batchIndex][ltrIdx / 2]; paths_a[batchIndex][t] = lbl_idx; ++indexScores; ltrIdx -= backPtrCpu_a[t][ltrIdx]; } } std::tuple<torch::Tensor, torch::Tensor> compute( const torch::Tensor& logProbs, const torch::Tensor& targets, const torch::Tensor& inputLengths, const torch::Tensor& targetLengths, const int64_t blank) { TORCH_CHECK(logProbs.is_cuda(), "log_probs must be a CUDA tensor"); TORCH_CHECK(targets.is_cuda(), "targets must be a CUDA tensor"); TORCH_CHECK( logProbs.device() == targets.device(), "log_probs and targets need to be on the same device"); TORCH_CHECK( logProbs.dtype() == torch::kFloat64 || logProbs.dtype() == torch::kFloat32 || logProbs.dtype() == torch::kFloat16, "log_probs must be float64, float32 or float16 (half) type"); TORCH_CHECK( targets.dtype() == torch::kInt32 || targets.dtype() == torch::kInt64, "targets must be int32 or int64 type"); TORCH_CHECK(logProbs.is_contiguous(), "log_probs must be contiguous"); TORCH_CHECK(targets.is_contiguous(), "targets must be contiguous"); TORCH_CHECK( logProbs.dim() == 3, "log_probs must be 3-D (batch_size, input length, num classes)"); TORCH_CHECK( targets.dim() == 2, "targets must be 2-D (batch_size, target length,)"); TORCH_CHECK( inputLengths.dim() == 1, "input_lengths must be 1-D (batch_size,)"); TORCH_CHECK( targetLengths.dim() == 1, "target_lengths must be 1-D (batch_size,)"); TORCH_CHECK( logProbs.size(0) == 1, "The batch dimension for log_probs must be 1 at the current version.") TORCH_CHECK( targets.size(0) == 1, "The batch dimension for targets must be 1 at the current version.") TORCH_CHECK( blank >= 0 && blank < logProbs.size(-1), "blank must be within [0, num classes)"); TORCH_CHECK( logProbs.size(1) == at::max(inputLengths).item().toInt(), "input length mismatch"); TORCH_CHECK( targets.size(1) == at::max(targetLengths).item().toInt(), "target length mismatch"); auto B = logProbs.size(0); auto T = logProbs.size(1); // num frames auto paths = torch::zeros( {B, T}, torch::TensorOptions().device(torch::kCPU).dtype(targets.dtype())); AT_DISPATCH_FLOATING_TYPES_AND_HALF( logProbs.scalar_type(), "forced_align_impl", [&] { if (targets.scalar_type() == torch::kInt64) { forced_align_impl<scalar_t, torch::kInt64>( logProbs, targets, blank, paths); } else { forced_align_impl<scalar_t, torch::kInt32>( logProbs, targets, blank, paths); } }); return std::make_tuple( paths.to(logProbs.device()), logProbs.index( {torch::indexing::Slice(), torch::linspace( 0, T - 1, T, torch::TensorOptions().dtype(paths.dtype())), paths.index({0})})); } TORCH_LIBRARY_IMPL(torchaudio, CUDA, m) { m.impl("forced_align", &compute); } } // namespace gpu } // namespace alignment } // namespace torchaudio
06fae9a533d39bf35f28e0de82a53ca80b0b2d33.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda.h> #include<vector> #include<stdio.h> void print(float *v, int n ) { for (int i = 0; i < n ; i++) { printf("%f ",v[i]); if (i%10 == 0) { printf("\n"); } } } __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } int main(void){ int numElements = 500; int size = numElements * sizeof(float); float *h_A = (float *)malloc(size); float *h_B = (float *)malloc(size); float *h_C = (float *)malloc(size); for (int i = 0; i < numElements; ++i) { h_A[i] = 1;//rand()/(float)RAND_MAX; h_B[i] = 1;//rand()/(float)RAND_MAX; } float *d_A = NULL; hipMalloc((void **)&d_A, size); float *d_B = NULL; hipMalloc((void **)&d_B, size); float *d_C = NULL; hipMalloc((void **)&d_C, size); hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); int threadsPerBlock = 256; int blocksPerGrid = 2; hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements); hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); print(h_C, numElements); free(h_A); free(h_B); free(h_C); return 0; }
06fae9a533d39bf35f28e0de82a53ca80b0b2d33.cu
#include<cuda.h> #include<vector> #include<stdio.h> void print(float *v, int n ) { for (int i = 0; i < n ; i++) { printf("%f ",v[i]); if (i%10 == 0) { printf("\n"); } } } __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } int main(void){ int numElements = 500; int size = numElements * sizeof(float); float *h_A = (float *)malloc(size); float *h_B = (float *)malloc(size); float *h_C = (float *)malloc(size); for (int i = 0; i < numElements; ++i) { h_A[i] = 1;//rand()/(float)RAND_MAX; h_B[i] = 1;//rand()/(float)RAND_MAX; } float *d_A = NULL; cudaMalloc((void **)&d_A, size); float *d_B = NULL; cudaMalloc((void **)&d_B, size); float *d_C = NULL; cudaMalloc((void **)&d_C, size); cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); int threadsPerBlock = 256; int blocksPerGrid = 2; vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); print(h_C, numElements); free(h_A); free(h_B); free(h_C); return 0; }
44f903154955300348c6a318cbfc515ffa94258c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "des_kernel_encrypt.h" #include "des_kernel_salt_instances.h" #ifdef DESGPU_COMPILE_ALL_SALTS void des_25_encrypt_salt2176(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 15, 16, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 63, 32, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56, 47, 48, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2177(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 31, 16, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 47, 32, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56, 63, 48, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2178(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 15, 0, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 63, 48, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56, 47, 32, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2179(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 31, 0, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 47, 48, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56, 63, 32, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2180(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 15, 16, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 63, 32, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56, 47, 48, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2181(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 31, 16, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 47, 32, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56, 63, 48, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2182(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 15, 0, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 63, 48, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56, 47, 32, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2183(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 31, 0, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 47, 48, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56, 63, 32, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2184(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 15, 16, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 63, 32, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56, 47, 48, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2185(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 31, 16, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 47, 32, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56, 63, 48, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2186(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 15, 0, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 63, 48, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56, 47, 32, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2187(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 31, 0, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 47, 48, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56, 63, 32, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2188(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 15, 16, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 63, 32, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56, 47, 48, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2189(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 31, 16, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 47, 32, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56, 63, 48, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2190(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 15, 0, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 63, 48, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56, 47, 32, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2191(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 31, 0, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 47, 48, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56, 63, 32, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2192(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 15, 16, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 63, 32, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56, 47, 48, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2193(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 31, 16, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 47, 32, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56, 63, 48, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2194(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 15, 0, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 63, 48, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56, 47, 32, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2195(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 31, 0, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 47, 48, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56, 63, 32, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2196(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 15, 16, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 63, 32, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56, 47, 48, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2197(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 31, 16, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 47, 32, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56, 63, 48, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2198(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 15, 0, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 63, 48, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56, 47, 32, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2199(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 31, 0, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 47, 48, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56, 63, 32, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2200(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 15, 16, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 63, 32, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56, 47, 48, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2201(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 31, 16, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 47, 32, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56, 63, 48, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2202(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 15, 0, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 63, 48, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56, 47, 32, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2203(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 31, 0, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 47, 48, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56, 63, 32, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2204(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 15, 16, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 63, 32, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56, 47, 48, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2205(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 31, 16, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 47, 32, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56, 63, 48, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2206(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 15, 0, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 63, 48, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56, 47, 32, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2207(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 31, 0, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 47, 48, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56, 63, 32, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2208(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 15, 16, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 63, 32, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56, 47, 48, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2209(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 31, 16, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 47, 32, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56, 63, 48, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2210(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 15, 0, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 63, 48, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56, 47, 32, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2211(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 31, 0, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 47, 48, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56, 63, 32, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2212(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 15, 16, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 63, 32, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56, 47, 48, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2213(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 31, 16, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 47, 32, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56, 63, 48, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2214(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 15, 0, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 63, 48, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56, 47, 32, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2215(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 31, 0, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 47, 48, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56, 63, 32, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2216(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 15, 16, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 63, 32, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56, 47, 48, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2217(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 31, 16, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 47, 32, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56, 63, 48, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2218(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 15, 0, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 63, 48, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56, 47, 32, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2219(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 31, 0, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 47, 48, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56, 63, 32, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2220(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 15, 16, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 63, 32, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56, 47, 48, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2221(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 31, 16, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 47, 32, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56, 63, 48, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2222(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 15, 0, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 63, 48, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56, 47, 32, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2223(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 31, 0, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 47, 48, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56, 63, 32, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2224(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 15, 16, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 63, 32, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56, 47, 48, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2225(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 31, 16, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 47, 32, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56, 63, 48, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2226(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 15, 0, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 63, 48, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56, 47, 32, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2227(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 31, 0, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 47, 48, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56, 63, 32, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2228(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 15, 16, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 63, 32, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56, 47, 48, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2229(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 31, 16, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 47, 32, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56, 63, 48, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2230(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 15, 0, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 63, 48, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56, 47, 32, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2231(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 31, 0, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 47, 48, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56, 63, 32, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2232(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 15, 16, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 63, 32, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56, 47, 48, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2233(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 31, 16, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 47, 32, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56, 63, 48, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2234(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 15, 0, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 63, 48, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56, 47, 32, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2235(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 31, 0, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 47, 48, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56, 63, 32, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2236(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 15, 16, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 63, 32, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56, 47, 48, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2237(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 31, 16, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 47, 32, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56, 63, 48, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2238(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 15, 0, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 63, 48, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56, 47, 32, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2239(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 31, 0, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 47, 48, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56, 63, 32, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2240(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 15, 16, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 63, 32, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56, 47, 48, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2241(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 31, 16, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 47, 32, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56, 63, 48, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2242(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 15, 0, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 63, 48, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56, 47, 32, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2243(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 31, 0, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 47, 48, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56, 63, 32, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2244(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 15, 16, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 63, 32, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56, 47, 48, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2245(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 31, 16, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 47, 32, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56, 63, 48, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2246(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 15, 0, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 63, 48, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56, 47, 32, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2247(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 31, 0, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 47, 48, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56, 63, 32, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2248(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 15, 16, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 63, 32, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56, 47, 48, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2249(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 31, 16, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 47, 32, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56, 63, 48, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2250(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 15, 0, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 63, 48, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56, 47, 32, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2251(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 31, 0, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 47, 48, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56, 63, 32, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2252(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 15, 16, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 63, 32, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56, 47, 48, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2253(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 31, 16, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 47, 32, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56, 63, 48, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2254(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 15, 0, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 63, 48, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56, 47, 32, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2255(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 31, 0, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 47, 48, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56, 63, 32, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2256(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 15, 16, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 63, 32, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56, 47, 48, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2257(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 31, 16, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 47, 32, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56, 63, 48, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2258(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 15, 0, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 63, 48, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56, 47, 32, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2259(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 31, 0, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 47, 48, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56, 63, 32, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2260(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 15, 16, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 63, 32, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56, 47, 48, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2261(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 31, 16, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 47, 32, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56, 63, 48, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2262(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 15, 0, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 63, 48, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56, 47, 32, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2263(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 31, 0, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 47, 48, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56, 63, 32, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2264(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 15, 16, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 63, 32, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56, 47, 48, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2265(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 31, 16, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 47, 32, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56, 63, 48, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2266(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 15, 0, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 63, 48, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56, 47, 32, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2267(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 31, 0, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 47, 48, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56, 63, 32, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2268(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 15, 16, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 63, 32, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56, 47, 48, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2269(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 31, 16, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 47, 32, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56, 63, 48, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2270(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 15, 0, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 63, 48, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56, 47, 32, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2271(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 31, 0, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 47, 48, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56, 63, 32, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2272(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 15, 16, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 63, 32, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56, 47, 48, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2273(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 31, 16, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 47, 32, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56, 63, 48, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2274(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 15, 0, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 63, 48, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56, 47, 32, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2275(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 31, 0, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 47, 48, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56, 63, 32, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2276(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 15, 16, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 63, 32, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56, 47, 48, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2277(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 31, 16, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 47, 32, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56, 63, 48, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2278(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 15, 0, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 63, 48, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56, 47, 32, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2279(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 31, 0, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 47, 48, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56, 63, 32, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2280(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 15, 16, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 63, 32, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56, 47, 48, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2281(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 31, 16, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 47, 32, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56, 63, 48, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2282(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 15, 0, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 63, 48, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56, 47, 32, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2283(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 31, 0, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 47, 48, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56, 63, 32, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2284(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 15, 16, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 63, 32, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56, 47, 48, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2285(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 31, 16, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 47, 32, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56, 63, 48, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2286(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 15, 0, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 63, 48, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56, 47, 32, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2287(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 31, 0, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 47, 48, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56, 63, 32, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2288(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 15, 16, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 63, 32, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56, 47, 48, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2289(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 31, 16, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 47, 32, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56, 63, 48, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2290(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 15, 0, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 63, 48, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56, 47, 32, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2291(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 31, 0, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 47, 48, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56, 63, 32, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2292(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 15, 16, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 63, 32, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56, 47, 48, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2293(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 31, 16, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 47, 32, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56, 63, 48, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2294(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 15, 0, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 63, 48, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56, 47, 32, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2295(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 31, 0, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 47, 48, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56, 63, 32, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2296(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 15, 16, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 63, 32, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56, 47, 48, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2297(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 31, 16, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 47, 32, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56, 63, 48, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2298(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 15, 0, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 63, 48, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56, 47, 32, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2299(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 31, 0, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 47, 48, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56, 63, 32, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2300(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 15, 16, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 63, 32, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56, 47, 48, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2301(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 31, 16, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 47, 32, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56, 63, 48, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2302(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 15, 0, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 63, 48, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56, 47, 32, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2303(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 31, 0, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 47, 48, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56, 63, 32, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } #endif // DESGPU_COMPILE_ALL_SALTS
44f903154955300348c6a318cbfc515ffa94258c.cu
#include "des_kernel_encrypt.h" #include "des_kernel_salt_instances.h" #ifdef DESGPU_COMPILE_ALL_SALTS void des_25_encrypt_salt2176(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 15, 16, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 63, 32, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56, 47, 48, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2177(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 31, 16, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 47, 32, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56, 63, 48, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2178(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 15, 0, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 63, 48, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56, 47, 32, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2179(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 31, 0, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 47, 48, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56, 63, 32, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2180(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 15, 16, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 63, 32, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56, 47, 48, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2181(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 31, 16, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 47, 32, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56, 63, 48, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2182(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 15, 0, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 63, 48, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56, 47, 32, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2183(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 31, 0, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 47, 48, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56, 63, 32, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2184(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 15, 16, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 63, 32, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56, 47, 48, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2185(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 31, 16, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 47, 32, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56, 63, 48, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2186(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 15, 0, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 63, 48, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56, 47, 32, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2187(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 31, 0, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 47, 48, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56, 63, 32, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2188(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 15, 16, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 63, 32, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56, 47, 48, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2189(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 31, 16, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 47, 32, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56, 63, 48, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2190(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 15, 0, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 63, 48, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56, 47, 32, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2191(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 31, 0, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 47, 48, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56, 63, 32, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2192(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 15, 16, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 63, 32, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56, 47, 48, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2193(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 31, 16, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 47, 32, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56, 63, 48, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2194(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 15, 0, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 63, 48, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56, 47, 32, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2195(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 31, 0, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 47, 48, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56, 63, 32, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2196(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 15, 16, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 63, 32, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56, 47, 48, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2197(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 31, 16, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 47, 32, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56, 63, 48, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2198(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 15, 0, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 63, 48, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56, 47, 32, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2199(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 31, 0, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 47, 48, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56, 63, 32, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2200(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 15, 16, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 63, 32, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56, 47, 48, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2201(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 31, 16, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 47, 32, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56, 63, 48, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2202(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 15, 0, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 63, 48, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56, 47, 32, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2203(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 31, 0, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 47, 48, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56, 63, 32, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2204(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 15, 16, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 63, 32, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56, 47, 48, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2205(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 31, 16, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 47, 32, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56, 63, 48, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2206(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 15, 0, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 63, 48, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56, 47, 32, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2207(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 31, 0, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 47, 48, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56, 63, 32, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2208(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 15, 16, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 63, 32, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56, 47, 48, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2209(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 31, 16, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 47, 32, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56, 63, 48, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2210(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 15, 0, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 63, 48, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56, 47, 32, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2211(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 31, 0, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 47, 48, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56, 63, 32, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2212(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 15, 16, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 63, 32, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56, 47, 48, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2213(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 31, 16, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 47, 32, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56, 63, 48, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2214(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 15, 0, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 63, 48, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56, 47, 32, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2215(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 31, 0, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 47, 48, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56, 63, 32, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2216(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 15, 16, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 63, 32, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56, 47, 48, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2217(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 31, 16, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 47, 32, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56, 63, 48, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2218(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 15, 0, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 63, 48, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56, 47, 32, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2219(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 31, 0, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 47, 48, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56, 63, 32, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2220(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 15, 16, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 63, 32, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56, 47, 48, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2221(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 31, 16, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 47, 32, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56, 63, 48, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2222(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 15, 0, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 63, 48, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56, 47, 32, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2223(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 31, 0, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 47, 48, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56, 63, 32, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2224(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 15, 16, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 63, 32, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56, 47, 48, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2225(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 31, 16, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 47, 32, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56, 63, 48, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2226(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 15, 0, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 63, 48, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56, 47, 32, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2227(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 31, 0, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 47, 48, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56, 63, 32, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2228(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 15, 16, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 63, 32, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56, 47, 48, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2229(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 31, 16, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 47, 32, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56, 63, 48, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2230(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 15, 0, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 63, 48, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56, 47, 32, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2231(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 31, 0, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 47, 48, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56, 63, 32, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2232(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 15, 16, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 63, 32, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56, 47, 48, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2233(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 31, 16, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 47, 32, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56, 63, 48, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2234(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 15, 0, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 63, 48, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56, 47, 32, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2235(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 31, 0, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 47, 48, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56, 63, 32, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2236(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 15, 16, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 63, 32, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56, 47, 48, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2237(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 31, 16, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 47, 32, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56, 63, 48, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2238(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 15, 0, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 63, 48, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56, 47, 32, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2239(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 31, 0, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 47, 48, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56, 63, 32, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2240(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 15, 16, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 63, 32, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56, 47, 48, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2241(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 31, 16, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 47, 32, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56, 63, 48, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2242(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 15, 0, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 63, 48, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56, 47, 32, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2243(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 31, 0, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 47, 48, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56, 63, 32, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2244(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 15, 16, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 63, 32, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56, 47, 48, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2245(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 31, 16, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 47, 32, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56, 63, 48, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2246(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 15, 0, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 63, 48, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56, 47, 32, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2247(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 31, 0, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 47, 48, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56, 63, 32, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2248(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 15, 16, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 63, 32, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56, 47, 48, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2249(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 31, 16, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 47, 32, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56, 63, 48, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2250(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 15, 0, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 63, 48, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56, 47, 32, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2251(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 31, 0, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 47, 48, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56, 63, 32, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2252(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 15, 16, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 63, 32, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56, 47, 48, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2253(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 31, 16, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 47, 32, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56, 63, 48, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2254(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 15, 0, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 63, 48, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56, 47, 32, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2255(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 31, 0, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 47, 48, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56, 63, 32, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2256(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 15, 16, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 63, 32, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56, 47, 48, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2257(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 31, 16, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 47, 32, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56, 63, 48, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2258(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 15, 0, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 63, 48, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56, 47, 32, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2259(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 31, 0, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 47, 48, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56, 63, 32, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2260(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 15, 16, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 63, 32, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56, 47, 48, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2261(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 31, 16, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 47, 32, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56, 63, 48, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2262(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 15, 0, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 63, 48, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56, 47, 32, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2263(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 31, 0, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 47, 48, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56, 63, 32, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2264(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 15, 16, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 63, 32, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56, 47, 48, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2265(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 31, 16, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 47, 32, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56, 63, 48, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2266(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 15, 0, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 63, 48, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56, 47, 32, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2267(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 31, 0, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 47, 48, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56, 63, 32, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2268(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 15, 16, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 63, 32, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56, 47, 48, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2269(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 31, 16, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 47, 32, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56, 63, 48, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2270(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 15, 0, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 63, 48, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56, 47, 32, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2271(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 31, 0, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 47, 48, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56, 63, 32, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2272(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 15, 16, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 63, 32, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56, 47, 48, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2273(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 31, 16, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 47, 32, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56, 63, 48, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2274(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 15, 0, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 63, 48, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56, 47, 32, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2275(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 31, 0, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 47, 48, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56, 63, 32, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2276(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 15, 16, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 63, 32, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56, 47, 48, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2277(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 31, 16, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 47, 32, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56, 63, 48, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2278(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 15, 0, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 63, 48, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56, 47, 32, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2279(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 31, 0, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 47, 48, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56, 63, 32, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2280(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 15, 16, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 63, 32, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56, 47, 48, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2281(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 31, 16, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 47, 32, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56, 63, 48, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2282(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 15, 0, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 63, 48, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56, 47, 32, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2283(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 31, 0, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 47, 48, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56, 63, 32, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2284(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 15, 16, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 63, 32, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56, 47, 48, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2285(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 31, 16, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 47, 32, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56, 63, 48, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2286(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 15, 0, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 63, 48, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56, 47, 32, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2287(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 31, 0, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 47, 48, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56, 63, 32, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2288(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 15, 16, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 63, 32, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56, 47, 48, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2289(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 31, 16, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 47, 32, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56, 63, 48, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2290(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 15, 0, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 63, 48, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56, 47, 32, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2291(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 31, 0, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 47, 48, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56, 63, 32, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2292(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 15, 16, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 63, 32, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56, 47, 48, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2293(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 31, 16, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 47, 32, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56, 63, 48, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2294(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 15, 0, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 63, 48, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56, 47, 32, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2295(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 31, 0, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 47, 48, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56, 63, 32, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2296(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 15, 16, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 63, 32, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56, 47, 48, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2297(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 31, 16, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 47, 32, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56, 63, 48, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2298(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 15, 0, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 63, 48, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56, 47, 32, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2299(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 31, 0, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 47, 48, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56, 63, 32, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2300(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 15, 16, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 63, 32, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56, 47, 48, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2301(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 31, 16, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 47, 32, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56, 63, 48, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2302(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 15, 0, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 63, 48, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56, 47, 32, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2303(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 31, 0, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 47, 48, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56, 63, 32, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } #endif // DESGPU_COMPILE_ALL_SALTS
ba382d1f55f7909524801bf574d0d96b33fd3c79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void swapVals_kernel(unsigned int * d_newArray, unsigned int * d_oldArray, unsigned int numElems) { unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < numElems) { d_newArray[gIdx] = d_oldArray[gIdx]; } }
ba382d1f55f7909524801bf574d0d96b33fd3c79.cu
#include "includes.h" __global__ void swapVals_kernel(unsigned int * d_newArray, unsigned int * d_oldArray, unsigned int numElems) { unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < numElems) { d_newArray[gIdx] = d_oldArray[gIdx]; } }
992bb7b531f95a93d0e32963db0b2f0c258f4db7.hip
// !!! This is a file automatically generated by hipify!!! #include <cstddef> #include <iostream> #include <algorithm> #pragma once #include "ctc_helper.h" #include "gpu_ctc_kernels.h" #include "reduce_hip.cuh" template <typename ProbT> class GpuCTC { public: GpuCTC(int alphabet_size, int minibatch, void* workspace, hipStream_t stream, int blank_label) : out_dim_(alphabet_size), minibatch_(minibatch), gpu_workspace_(workspace), stream_(stream), blank_label_(blank_label) {}; // Noncopyable GpuCTC(const GpuCTC &) = delete; GpuCTC & operator=(const GpuCTC &) = delete; ctcStatus_t cost_and_grad(const ProbT* const activations, ProbT* grads, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths); ctcStatus_t score_forward(const ProbT* const activations, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths); private: template<int NT, int VT> ctcStatus_t launch_alpha_beta_kernels(const ProbT* const probs, ProbT* grads, bool compute_alpha, bool compute_beta); ctcStatus_t launch_gpu_kernels(const ProbT* const probs, ProbT* grads, size_t config, bool launch_alpha, bool launch_beta); ctcStatus_t setup_gpu_metadata(const int* const flat_labels, const int* const label_lengths, const int* const input_lengths); ctcStatus_t create_metadata_and_choose_config(const int* const label_lengths, const int* const flat_labels, const int* const input_lengths, size_t & best_config); ctcStatus_t compute_probs(const ProbT* const activations); ctcStatus_t compute_cost_and_score(const ProbT* const activations, ProbT* grads, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths, bool compute_alpha, bool compute_betas_and_grad); int out_dim_; // Number of characters plus blank int minibatch_; int S_; int T_; int activation_cols_; // Number of columns in activations hipStream_t stream_; int blank_label_; void* gpu_workspace_; // Buffer for all temporary GPU memory int* utt_length_; // T int* label_sizes_; // L int* repeats_; // repeats_ int* label_offsets_; int* labels_without_blanks_; int* labels_with_blanks_; ProbT* alphas_; ProbT* nll_forward_; ProbT* nll_backward_; ProbT* denoms_; // Temporary storage for denoms for softmax ProbT* probs_; // Temporary storage for probabilities (softmax output) }; template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::setup_gpu_metadata(const int* const flat_labels, const int* const label_lengths, const int* const input_lengths) { size_t gpu_bytes_used = 0; nll_forward_ = reinterpret_cast<ProbT*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += minibatch_ * sizeof(ProbT); nll_backward_ = reinterpret_cast<ProbT*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += minibatch_ * sizeof(ProbT); repeats_ = reinterpret_cast<int*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += minibatch_ * sizeof(int); label_offsets_ = reinterpret_cast<int*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += minibatch_ * sizeof(int); // This is the max of all S and T for all valid examples in the minibatch. // A valid example is one for which L + repeats <= T S_ = 0; T_ = 0; // This is the max of all timesteps, valid or not. Needed to compute offsets int Tmax = 0; // This is the max of all labels, valid or not. Needed to compute offsets int Lmax = 0; int total_label_length = 0; constexpr int cpu_buffer_size = 64; int repeats[cpu_buffer_size]; int label_offsets[cpu_buffer_size]; const int num_passes = ctc_helper::div_up(minibatch_, cpu_buffer_size); hipError_t cuda_status; for (int pass = 0; pass < num_passes; ++pass) { const int start_idx = pass * cpu_buffer_size; const int end_idx = ::min(minibatch_, (pass + 1) * cpu_buffer_size); for (int j = start_idx; j < end_idx; ++j) { const int L = label_lengths[j]; const int local_T = input_lengths[j]; const int* label_ptr = &(flat_labels[total_label_length]); label_offsets[j % cpu_buffer_size] = total_label_length; total_label_length += L; int repeat_counter = 0; for (int i = 1; i < L; ++i) { repeat_counter += (label_ptr[i] == label_ptr[i - 1]); } repeats[j % cpu_buffer_size] = repeat_counter; const bool valid_label = ((L + repeat_counter) <= local_T); // Only update S and T if label is valid S_ = (valid_label) ? ::max(S_, L) : S_; T_ = (valid_label) ? ::max(T_, local_T) : T_; Tmax = ::max(Tmax, local_T); Lmax = ::max(Lmax, L); } cuda_status = hipMemcpyAsync(&(repeats_[start_idx]), repeats, (end_idx - start_idx) * sizeof(int), hipMemcpyHostToDevice, stream_); if (cuda_status != hipSuccess) { return CTC_STATUS_MEMOPS_FAILED; } cuda_status = hipMemcpyAsync(&(label_offsets_[start_idx]), label_offsets, (end_idx - start_idx) * sizeof(int), hipMemcpyHostToDevice, stream_); if (cuda_status != hipSuccess) { return CTC_STATUS_MEMOPS_FAILED; } } S_ = 2 * S_ + 1; const int Smax = 2 * Lmax + 1; activation_cols_ = minibatch_ * Tmax; // Allocate memory for T utt_length_ = reinterpret_cast<int*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += minibatch_ * sizeof(int); cuda_status = hipMemcpyAsync(utt_length_, input_lengths, minibatch_ * sizeof(int), hipMemcpyHostToDevice, stream_); if (cuda_status != hipSuccess) { return CTC_STATUS_MEMOPS_FAILED; } label_sizes_ = reinterpret_cast<int*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += minibatch_ * sizeof(int); cuda_status = hipMemcpyAsync(label_sizes_, label_lengths, minibatch_ * sizeof(int), hipMemcpyHostToDevice, stream_); if (cuda_status != hipSuccess) { return CTC_STATUS_MEMOPS_FAILED; } labels_without_blanks_ = reinterpret_cast<int*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += Lmax * minibatch_ * sizeof(int); cuda_status = hipMemcpyAsync(labels_without_blanks_, flat_labels, total_label_length * sizeof(int), hipMemcpyHostToDevice, stream_); if (cuda_status != hipSuccess) { return CTC_STATUS_MEMOPS_FAILED; } labels_with_blanks_ = reinterpret_cast<int*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += Smax * minibatch_ * sizeof(int); alphas_ = reinterpret_cast<ProbT*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += (S_ * T_) * minibatch_ * sizeof(ProbT); denoms_ = reinterpret_cast<ProbT*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += activation_cols_ * sizeof(ProbT); probs_ = reinterpret_cast<ProbT*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += out_dim_ * activation_cols_ * sizeof(ProbT); return CTC_STATUS_SUCCESS; } template<typename ProbT> template<int NT, int VT> ctcStatus_t GpuCTC<ProbT>::launch_alpha_beta_kernels(const ProbT* const probs, ProbT* grads, bool compute_alpha, bool compute_beta) { // One thread block per utterance const int grid_size = minibatch_; // The data is laid out so that the next timestep is minibatch entries // away const int stride = minibatch_; if (compute_alpha) compute_alpha_kernel<ProbT, NT, VT> << < grid_size, NT, 0, stream_ >> > (probs, label_sizes_, utt_length_, repeats_, labels_without_blanks_, label_offsets_, labels_with_blanks_, alphas_, nll_forward_, stride, out_dim_, S_, T_, blank_label_); if (compute_beta) { compute_betas_and_grad_kernel<ProbT, NT, VT> << < grid_size, NT, 0, stream_ >> > (probs, label_sizes_, utt_length_, repeats_, labels_with_blanks_, alphas_, nll_forward_, nll_backward_, grads, stride, out_dim_, S_, T_, blank_label_); hipStreamSynchronize(stream_); } hipError_t err = hipGetLastError(); if (err != hipSuccess) return CTC_STATUS_EXECUTION_FAILED; return CTC_STATUS_SUCCESS; } template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::create_metadata_and_choose_config(const int* const flat_labels, const int* const label_lengths, const int* const input_lengths, size_t & best_config) { // Setup the metadata for GPU ctcStatus_t status = setup_gpu_metadata(flat_labels, label_lengths, input_lengths); if (status != CTC_STATUS_SUCCESS) { return status; } constexpr int num_configs = 12; int config_NT[num_configs] = { 32, 64, 128, 64, 128, 32, 64, 128, 64, 128, 128, 128 }; int config_VT[num_configs] = { 1, 1, 1, 3, 2, 9, 6, 4, 9, 6, 9, 10 }; best_config = 0; for (int i = 0; i < num_configs; ++i) { if ((config_NT[i] * config_VT[i]) >= S_) { break; } else { best_config++; } } if (best_config >= num_configs) { return CTC_STATUS_UNKNOWN_ERROR; } return CTC_STATUS_SUCCESS; } template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::launch_gpu_kernels(const ProbT* const probs, ProbT* grads, size_t config, bool l_a, bool l_b) { switch (config) { case 0: {return launch_alpha_beta_kernels<32, 1>(probs, grads, l_a, l_b); } case 1: {return launch_alpha_beta_kernels<64, 1>(probs, grads, l_a, l_b); } case 2: {return launch_alpha_beta_kernels<128, 1>(probs, grads, l_a, l_b); } case 3: {return launch_alpha_beta_kernels<64, 3>(probs, grads, l_a, l_b); } case 4: {return launch_alpha_beta_kernels<128, 2>(probs, grads, l_a, l_b); } case 5: {return launch_alpha_beta_kernels<32, 9>(probs, grads, l_a, l_b); } case 6: {return launch_alpha_beta_kernels<64, 6>(probs, grads, l_a, l_b); } case 7: {return launch_alpha_beta_kernels<128, 4>(probs, grads, l_a, l_b); } case 8: {return launch_alpha_beta_kernels<64, 9>(probs, grads, l_a, l_b); } case 9: {return launch_alpha_beta_kernels<128, 6>(probs, grads, l_a, l_b); } case 10: {return launch_alpha_beta_kernels<128, 9>(probs, grads, l_a, l_b); } case 11: {return launch_alpha_beta_kernels<128, 10>(probs, grads, l_a, l_b); } } return CTC_STATUS_EXECUTION_FAILED; } template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::compute_probs(const ProbT* const activations) { hipError_t cuda_status; cuda_status = hipMemcpyAsync(probs_, activations, activation_cols_ * out_dim_ * sizeof(ProbT), hipMemcpyDeviceToDevice, stream_); if (cuda_status != hipSuccess) { return CTC_STATUS_MEMOPS_FAILED; } // Numerically stable SM ctcStatus_t ctc_status = reduce_max(probs_, denoms_, out_dim_, activation_cols_, 1, stream_); if (ctc_status != CTC_STATUS_SUCCESS) { return ctc_status; } // Kernel launch to subtract maximum const int NT = 128; const int VT = 1; const int NV = NT * VT; const int num_elements = out_dim_ * activation_cols_; const int grid_size = ctc_helper::div_up(num_elements, NV); prepare_stable_SM_kernel<ProbT, VT> << < grid_size, NT, 0, stream_ >> > (ctc_helper::identity<ProbT>(), probs_, denoms_, out_dim_, num_elements); // Reduce along columns to calculate denominator ctc_status = reduce_exp(probs_, denoms_, out_dim_, activation_cols_, 1, stream_); if (ctc_status != CTC_STATUS_SUCCESS) return ctc_status; // Kernel launch to calculate probabilities compute_probs_kernel<ProbT, VT> << < grid_size, NT, 0, stream_ >> > (ctc_helper::exponential<ProbT>(), probs_, denoms_, out_dim_, num_elements); return CTC_STATUS_SUCCESS; } template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::compute_cost_and_score(const ProbT* const activations, ProbT* grads, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths, bool compute_alpha, bool compute_betas_and_grad) { size_t best_config; ctcStatus_t status = create_metadata_and_choose_config(flat_labels, label_lengths, input_lengths, best_config); if (status != CTC_STATUS_SUCCESS) { return status; } status = compute_probs(activations); if (status != CTC_STATUS_SUCCESS) { return status; } launch_gpu_kernels(probs_, grads, best_config, compute_alpha, compute_betas_and_grad); hipError_t cuda_status_mem, cuda_status_sync; cuda_status_mem = hipMemcpyAsync(costs, nll_forward_, sizeof(ProbT) * minibatch_, hipMemcpyDeviceToHost, stream_); cuda_status_sync = hipStreamSynchronize(stream_); if (cuda_status_mem != hipSuccess || cuda_status_sync != hipSuccess) { return CTC_STATUS_MEMOPS_FAILED; } return CTC_STATUS_SUCCESS; } template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::cost_and_grad(const ProbT* const activations, ProbT* grads, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths) { if (activations == nullptr || grads == nullptr || costs == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr ) { return CTC_STATUS_INVALID_VALUE; } return compute_cost_and_score(activations, grads, costs, flat_labels, label_lengths, input_lengths, true, true); } template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::score_forward(const ProbT* const activations, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths) { if (activations == nullptr || costs == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr ) { return CTC_STATUS_INVALID_VALUE; } return compute_cost_and_score(activations, nullptr, costs, flat_labels, label_lengths, input_lengths, true, false); } ctcStatus_t FUN(compute_ctc_loss)(const Dtype* const activations, Dtype* gradients, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths, int alphabet_size, int minibatch, Dtype *costs, void *workspace, ctcOptions options) { if (activations == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr || costs == nullptr || workspace == nullptr || alphabet_size <= 0 || minibatch <= 0) return CTC_STATUS_INVALID_VALUE; GpuCTC<Dtype> ctc(alphabet_size, minibatch, workspace, options.stream, options.blank_label); if (gradients != NULL) return ctc.cost_and_grad(activations, gradients, costs, flat_labels, label_lengths, input_lengths); else return ctc.score_forward(activations, costs, flat_labels, label_lengths, input_lengths); } ctcStatus_t FUN(get_workspace_size)(const int* const label_lengths, const int* const input_lengths, int alphabet_size, int minibatch, ctcOptions options, size_t* size_bytes) { if (label_lengths == nullptr || input_lengths == nullptr || size_bytes == nullptr || alphabet_size <= 0 || minibatch <= 0) return CTC_STATUS_INVALID_VALUE; // This is the max of all S and T for all examples in the minibatch. int maxL = *std::max_element(label_lengths, label_lengths + minibatch); int maxT = *std::max_element(input_lengths, input_lengths + minibatch); const int S = 2 * maxL + 1; *size_bytes = 0; // GPU storage //nll_forward, nll_backward *size_bytes += 2 * sizeof(Dtype) * minibatch; //repeats *size_bytes += sizeof(int) * minibatch; //label offsets *size_bytes += sizeof(int) * minibatch; //utt_length *size_bytes += sizeof(int) * minibatch; //label lengths *size_bytes += sizeof(int) * minibatch; //labels without blanks - overallocate for now *size_bytes += sizeof(int) * maxL * minibatch; //labels with blanks *size_bytes += sizeof(int) * S * minibatch; //alphas *size_bytes += sizeof(Dtype) * S * maxT * minibatch; //denoms *size_bytes += sizeof(Dtype) * maxT * minibatch; //probs (since we will pass in activations) *size_bytes += sizeof(Dtype) * alphabet_size * maxT * minibatch; return CTC_STATUS_SUCCESS; } void FUN(ExtractInputData)(int T_, int N_, int C_, int blank_index_, const Dtype* seq_ind_data, const Dtype* labels_data, vector<int>* flat_labels, vector<int>* label_lengths, vector<int>* input_lengths) { const Dtype* seq_ind = seq_ind_data; const Dtype* target_seq = labels_data; flat_labels->clear(); flat_labels->reserve(T_ * N_); // maximum required label_lengths->resize(N_); input_lengths->resize(N_); // compute the sequence length and label length int* seq_len = input_lengths->data(); int* label_len = label_lengths->data(); int label_offset = 0; //if (blank_index_ == -1) { if (blank_index_ == 0) {//modified by jxs label_offset = 1; } for (int n = 0; n < N_; ++n) { seq_len[n] = T_; // default value is maximal allowed length label_len[n] = T_; // default value is maximal allowed length const Dtype* seq = seq_ind + n; const Dtype* label = target_seq + n; // sequence indicators start with seq == 0.0 to indicate the start of a // sequence. Skip at t = 0, so start at t = 1 seq += N_; for (int t = 1; t < T_; ++t) { if (static_cast<int>(*seq + 0.5) == 0) { seq_len[n] = t; break; } seq += N_; } // label indicators are negative if the sequence has ended for (int t = 0; t < T_; ++t) { if (*label < 0.0) { label_len[n] = t; break; } // Note that the blank label will be 0 flat_labels->push_back(static_cast<int>(*label + 0.5) + label_offset); label += N_; } // if the label length is 0, the seq_len is 1 (0 following 0) // set seq_len to 0 in this case aswell, to skip this example if (label_len[n] == 0) { CHECK_LE(seq_len[n], 1); seq_len[n] = 0; } CHECK_LE(label_len[n], seq_len[n]) << "The label length must be smaller or equals the sequence length!"; } } void FUN(warp_ctc_loss_fwd)(int T_, int N_, int C_, int count, int blank_index_, const Dtype* bottom0_data, Dtype* bottom0_mdiff, const Dtype* bottom1_data, const Dtype* bottom2_data, const Dtype* bottom3_data, Dtype* top) { const Dtype* activations = bottom0_data; Dtype* gradients = bottom0_mdiff; const int alphabet_size = C_; const int minibatch = N_; int bottom_size = (bottom0_data != NULL) + (bottom1_data != NULL) + (bottom2_data != NULL) + (bottom3_data != NULL); vector<int> flat_labels_; vector<int> label_lengths_; vector<int> input_lengths_; label_lengths_.resize(N_); input_lengths_.resize(N_); vector<Dtype> costs(N_); flat_labels_.clear(); if (bottom_size == 2) {//bottom[0]=activations, bottom[1] is labels, shape: Batchsize*seq len const Dtype* label_seq_d = bottom1_data; int label_len_per_batch = count / N_; for (int n = 0; n < N_; ++n) { int curlen = 0; for (int l = 0; l < label_len_per_batch; ++l) { int label = (int)label_seq_d[n * label_len_per_batch + l]; if (label <= blank_index_) { continue; } flat_labels_.push_back(label); curlen++; } label_lengths_[n] = curlen; input_lengths_[n] = T_; } } else if (bottom_size == 3) { FUN(ExtractInputData)(T_, N_, C_, blank_index_, bottom1_data, bottom2_data, &flat_labels_, &label_lengths_, &input_lengths_); } else if (bottom_size == 4) { //Blob* seq_len_blob = bottom1_data; //Blob* lab_len_blob = bottom2_data; //Blob* label_seq_blob = bottom3_data; const Dtype* seq_len_d = bottom1_data; const Dtype* lab_len_d = bottom2_data; const Dtype* label_seq_d = bottom3_data; int accumulated = 0; int label_len_per_batch = count / N_; //CHECK_EQ(seq_len_blob->count(), lab_len_blob->count()); for (int i = 0; i < count; ++i) { label_lengths_[i] = (int)lab_len_d[i]; input_lengths_[i] = (int)seq_len_d[i]; accumulated += (int)lab_len_d[i]; } flat_labels_.clear(); flat_labels_.reserve(accumulated); for (int n = 0; n < N_; ++n) { for (int t = 0; t < label_lengths_[n]; ++t) { flat_labels_.push_back((int)label_seq_d[n*label_len_per_batch + t]); } } } else { LOG(FATAL) << "Unsupported blobs shape"; } //remove repeat blank labels size_t workspace_alloc_bytes_; ctcOptions options; hipStream_t stream; CHECK_EQ(hipStreamCreate(&stream), hipSuccess); options.loc = CTC_GPU; options.stream = stream; options.blank_label = blank_index_; ctcStatus_t status = FUN(get_workspace_size)(label_lengths_.data(), input_lengths_.data(), alphabet_size, minibatch, options, &workspace_alloc_bytes_); CHECK_EQ(status, CTC_STATUS_SUCCESS) << "CTC Error: " << ctcGetStatusString(status); Buffer workspace_[1] = { 0 }; if (workspace_->size< workspace_alloc_bytes_) { gpu_ReAlloc(workspace_, workspace_alloc_bytes_ * sizeof(char)); } //cuda_compute_ctc_loss; status = FUN(compute_ctc_loss)(activations, gradients, flat_labels_.data(), label_lengths_.data(), input_lengths_.data(), alphabet_size, minibatch, costs.data(), workspace_->data, options ); CHECK_EQ(status, CTC_STATUS_SUCCESS) << "CTC Error: " << ctcGetStatusString(status); // output loss Dtype loss;// = top_mdata()[0]; loss = 0; int num = 1; for (int n = 0; n < N_; ++n) { if (costs[n] < std::numeric_limits<Dtype>::infinity()) { loss += costs[n]; ++num; } } if (num==1) { int asdf = 0; } loss /= num; if (isnan(loss)) { int asdf = 0; } *top = loss; Free(workspace_); #if 0 int gcnt = bottom[0]->count(); Dtype sumg = 0; for (int i = 0; i < gcnt; i++) { sumg += fabs(gradients[i]); } //LOG(INFO) << "mean ctc loss=" << loss << ",N_="<<N_<<",num="<<num << ", mean gradients="<<sumg/gcnt; #endif CHECK_EQ(hipStreamDestroy(stream), hipSuccess); return; }
992bb7b531f95a93d0e32963db0b2f0c258f4db7.cu
#include <cstddef> #include <iostream> #include <algorithm> #pragma once #include "ctc_helper.h" #include "gpu_ctc_kernels.h" #include "reduce.cuh" template <typename ProbT> class GpuCTC { public: GpuCTC(int alphabet_size, int minibatch, void* workspace, CUstream stream, int blank_label) : out_dim_(alphabet_size), minibatch_(minibatch), gpu_workspace_(workspace), stream_(stream), blank_label_(blank_label) {}; // Noncopyable GpuCTC(const GpuCTC &) = delete; GpuCTC & operator=(const GpuCTC &) = delete; ctcStatus_t cost_and_grad(const ProbT* const activations, ProbT* grads, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths); ctcStatus_t score_forward(const ProbT* const activations, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths); private: template<int NT, int VT> ctcStatus_t launch_alpha_beta_kernels(const ProbT* const probs, ProbT* grads, bool compute_alpha, bool compute_beta); ctcStatus_t launch_gpu_kernels(const ProbT* const probs, ProbT* grads, size_t config, bool launch_alpha, bool launch_beta); ctcStatus_t setup_gpu_metadata(const int* const flat_labels, const int* const label_lengths, const int* const input_lengths); ctcStatus_t create_metadata_and_choose_config(const int* const label_lengths, const int* const flat_labels, const int* const input_lengths, size_t & best_config); ctcStatus_t compute_probs(const ProbT* const activations); ctcStatus_t compute_cost_and_score(const ProbT* const activations, ProbT* grads, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths, bool compute_alpha, bool compute_betas_and_grad); int out_dim_; // Number of characters plus blank int minibatch_; int S_; int T_; int activation_cols_; // Number of columns in activations CUstream stream_; int blank_label_; void* gpu_workspace_; // Buffer for all temporary GPU memory int* utt_length_; // T int* label_sizes_; // L int* repeats_; // repeats_ int* label_offsets_; int* labels_without_blanks_; int* labels_with_blanks_; ProbT* alphas_; ProbT* nll_forward_; ProbT* nll_backward_; ProbT* denoms_; // Temporary storage for denoms for softmax ProbT* probs_; // Temporary storage for probabilities (softmax output) }; template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::setup_gpu_metadata(const int* const flat_labels, const int* const label_lengths, const int* const input_lengths) { size_t gpu_bytes_used = 0; nll_forward_ = reinterpret_cast<ProbT*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += minibatch_ * sizeof(ProbT); nll_backward_ = reinterpret_cast<ProbT*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += minibatch_ * sizeof(ProbT); repeats_ = reinterpret_cast<int*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += minibatch_ * sizeof(int); label_offsets_ = reinterpret_cast<int*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += minibatch_ * sizeof(int); // This is the max of all S and T for all valid examples in the minibatch. // A valid example is one for which L + repeats <= T S_ = 0; T_ = 0; // This is the max of all timesteps, valid or not. Needed to compute offsets int Tmax = 0; // This is the max of all labels, valid or not. Needed to compute offsets int Lmax = 0; int total_label_length = 0; constexpr int cpu_buffer_size = 64; int repeats[cpu_buffer_size]; int label_offsets[cpu_buffer_size]; const int num_passes = ctc_helper::div_up(minibatch_, cpu_buffer_size); cudaError_t cuda_status; for (int pass = 0; pass < num_passes; ++pass) { const int start_idx = pass * cpu_buffer_size; const int end_idx = std::min(minibatch_, (pass + 1) * cpu_buffer_size); for (int j = start_idx; j < end_idx; ++j) { const int L = label_lengths[j]; const int local_T = input_lengths[j]; const int* label_ptr = &(flat_labels[total_label_length]); label_offsets[j % cpu_buffer_size] = total_label_length; total_label_length += L; int repeat_counter = 0; for (int i = 1; i < L; ++i) { repeat_counter += (label_ptr[i] == label_ptr[i - 1]); } repeats[j % cpu_buffer_size] = repeat_counter; const bool valid_label = ((L + repeat_counter) <= local_T); // Only update S and T if label is valid S_ = (valid_label) ? std::max(S_, L) : S_; T_ = (valid_label) ? std::max(T_, local_T) : T_; Tmax = std::max(Tmax, local_T); Lmax = std::max(Lmax, L); } cuda_status = cudaMemcpyAsync(&(repeats_[start_idx]), repeats, (end_idx - start_idx) * sizeof(int), cudaMemcpyHostToDevice, stream_); if (cuda_status != cudaSuccess) { return CTC_STATUS_MEMOPS_FAILED; } cuda_status = cudaMemcpyAsync(&(label_offsets_[start_idx]), label_offsets, (end_idx - start_idx) * sizeof(int), cudaMemcpyHostToDevice, stream_); if (cuda_status != cudaSuccess) { return CTC_STATUS_MEMOPS_FAILED; } } S_ = 2 * S_ + 1; const int Smax = 2 * Lmax + 1; activation_cols_ = minibatch_ * Tmax; // Allocate memory for T utt_length_ = reinterpret_cast<int*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += minibatch_ * sizeof(int); cuda_status = cudaMemcpyAsync(utt_length_, input_lengths, minibatch_ * sizeof(int), cudaMemcpyHostToDevice, stream_); if (cuda_status != cudaSuccess) { return CTC_STATUS_MEMOPS_FAILED; } label_sizes_ = reinterpret_cast<int*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += minibatch_ * sizeof(int); cuda_status = cudaMemcpyAsync(label_sizes_, label_lengths, minibatch_ * sizeof(int), cudaMemcpyHostToDevice, stream_); if (cuda_status != cudaSuccess) { return CTC_STATUS_MEMOPS_FAILED; } labels_without_blanks_ = reinterpret_cast<int*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += Lmax * minibatch_ * sizeof(int); cuda_status = cudaMemcpyAsync(labels_without_blanks_, flat_labels, total_label_length * sizeof(int), cudaMemcpyHostToDevice, stream_); if (cuda_status != cudaSuccess) { return CTC_STATUS_MEMOPS_FAILED; } labels_with_blanks_ = reinterpret_cast<int*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += Smax * minibatch_ * sizeof(int); alphas_ = reinterpret_cast<ProbT*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += (S_ * T_) * minibatch_ * sizeof(ProbT); denoms_ = reinterpret_cast<ProbT*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += activation_cols_ * sizeof(ProbT); probs_ = reinterpret_cast<ProbT*>(static_cast<char*>(gpu_workspace_) + gpu_bytes_used); gpu_bytes_used += out_dim_ * activation_cols_ * sizeof(ProbT); return CTC_STATUS_SUCCESS; } template<typename ProbT> template<int NT, int VT> ctcStatus_t GpuCTC<ProbT>::launch_alpha_beta_kernels(const ProbT* const probs, ProbT* grads, bool compute_alpha, bool compute_beta) { // One thread block per utterance const int grid_size = minibatch_; // The data is laid out so that the next timestep is minibatch entries // away const int stride = minibatch_; if (compute_alpha) compute_alpha_kernel<ProbT, NT, VT> << < grid_size, NT, 0, stream_ >> > (probs, label_sizes_, utt_length_, repeats_, labels_without_blanks_, label_offsets_, labels_with_blanks_, alphas_, nll_forward_, stride, out_dim_, S_, T_, blank_label_); if (compute_beta) { compute_betas_and_grad_kernel<ProbT, NT, VT> << < grid_size, NT, 0, stream_ >> > (probs, label_sizes_, utt_length_, repeats_, labels_with_blanks_, alphas_, nll_forward_, nll_backward_, grads, stride, out_dim_, S_, T_, blank_label_); cudaStreamSynchronize(stream_); } cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return CTC_STATUS_EXECUTION_FAILED; return CTC_STATUS_SUCCESS; } template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::create_metadata_and_choose_config(const int* const flat_labels, const int* const label_lengths, const int* const input_lengths, size_t & best_config) { // Setup the metadata for GPU ctcStatus_t status = setup_gpu_metadata(flat_labels, label_lengths, input_lengths); if (status != CTC_STATUS_SUCCESS) { return status; } constexpr int num_configs = 12; int config_NT[num_configs] = { 32, 64, 128, 64, 128, 32, 64, 128, 64, 128, 128, 128 }; int config_VT[num_configs] = { 1, 1, 1, 3, 2, 9, 6, 4, 9, 6, 9, 10 }; best_config = 0; for (int i = 0; i < num_configs; ++i) { if ((config_NT[i] * config_VT[i]) >= S_) { break; } else { best_config++; } } if (best_config >= num_configs) { return CTC_STATUS_UNKNOWN_ERROR; } return CTC_STATUS_SUCCESS; } template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::launch_gpu_kernels(const ProbT* const probs, ProbT* grads, size_t config, bool l_a, bool l_b) { switch (config) { case 0: {return launch_alpha_beta_kernels<32, 1>(probs, grads, l_a, l_b); } case 1: {return launch_alpha_beta_kernels<64, 1>(probs, grads, l_a, l_b); } case 2: {return launch_alpha_beta_kernels<128, 1>(probs, grads, l_a, l_b); } case 3: {return launch_alpha_beta_kernels<64, 3>(probs, grads, l_a, l_b); } case 4: {return launch_alpha_beta_kernels<128, 2>(probs, grads, l_a, l_b); } case 5: {return launch_alpha_beta_kernels<32, 9>(probs, grads, l_a, l_b); } case 6: {return launch_alpha_beta_kernels<64, 6>(probs, grads, l_a, l_b); } case 7: {return launch_alpha_beta_kernels<128, 4>(probs, grads, l_a, l_b); } case 8: {return launch_alpha_beta_kernels<64, 9>(probs, grads, l_a, l_b); } case 9: {return launch_alpha_beta_kernels<128, 6>(probs, grads, l_a, l_b); } case 10: {return launch_alpha_beta_kernels<128, 9>(probs, grads, l_a, l_b); } case 11: {return launch_alpha_beta_kernels<128, 10>(probs, grads, l_a, l_b); } } return CTC_STATUS_EXECUTION_FAILED; } template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::compute_probs(const ProbT* const activations) { cudaError_t cuda_status; cuda_status = cudaMemcpyAsync(probs_, activations, activation_cols_ * out_dim_ * sizeof(ProbT), cudaMemcpyDeviceToDevice, stream_); if (cuda_status != cudaSuccess) { return CTC_STATUS_MEMOPS_FAILED; } // Numerically stable SM ctcStatus_t ctc_status = reduce_max(probs_, denoms_, out_dim_, activation_cols_, 1, stream_); if (ctc_status != CTC_STATUS_SUCCESS) { return ctc_status; } // Kernel launch to subtract maximum const int NT = 128; const int VT = 1; const int NV = NT * VT; const int num_elements = out_dim_ * activation_cols_; const int grid_size = ctc_helper::div_up(num_elements, NV); prepare_stable_SM_kernel<ProbT, VT> << < grid_size, NT, 0, stream_ >> > (ctc_helper::identity<ProbT>(), probs_, denoms_, out_dim_, num_elements); // Reduce along columns to calculate denominator ctc_status = reduce_exp(probs_, denoms_, out_dim_, activation_cols_, 1, stream_); if (ctc_status != CTC_STATUS_SUCCESS) return ctc_status; // Kernel launch to calculate probabilities compute_probs_kernel<ProbT, VT> << < grid_size, NT, 0, stream_ >> > (ctc_helper::exponential<ProbT>(), probs_, denoms_, out_dim_, num_elements); return CTC_STATUS_SUCCESS; } template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::compute_cost_and_score(const ProbT* const activations, ProbT* grads, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths, bool compute_alpha, bool compute_betas_and_grad) { size_t best_config; ctcStatus_t status = create_metadata_and_choose_config(flat_labels, label_lengths, input_lengths, best_config); if (status != CTC_STATUS_SUCCESS) { return status; } status = compute_probs(activations); if (status != CTC_STATUS_SUCCESS) { return status; } launch_gpu_kernels(probs_, grads, best_config, compute_alpha, compute_betas_and_grad); cudaError_t cuda_status_mem, cuda_status_sync; cuda_status_mem = cudaMemcpyAsync(costs, nll_forward_, sizeof(ProbT) * minibatch_, cudaMemcpyDeviceToHost, stream_); cuda_status_sync = cudaStreamSynchronize(stream_); if (cuda_status_mem != cudaSuccess || cuda_status_sync != cudaSuccess) { return CTC_STATUS_MEMOPS_FAILED; } return CTC_STATUS_SUCCESS; } template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::cost_and_grad(const ProbT* const activations, ProbT* grads, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths) { if (activations == nullptr || grads == nullptr || costs == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr ) { return CTC_STATUS_INVALID_VALUE; } return compute_cost_and_score(activations, grads, costs, flat_labels, label_lengths, input_lengths, true, true); } template<typename ProbT> ctcStatus_t GpuCTC<ProbT>::score_forward(const ProbT* const activations, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths) { if (activations == nullptr || costs == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr ) { return CTC_STATUS_INVALID_VALUE; } return compute_cost_and_score(activations, nullptr, costs, flat_labels, label_lengths, input_lengths, true, false); } ctcStatus_t FUN(compute_ctc_loss)(const Dtype* const activations, Dtype* gradients, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths, int alphabet_size, int minibatch, Dtype *costs, void *workspace, ctcOptions options) { if (activations == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr || costs == nullptr || workspace == nullptr || alphabet_size <= 0 || minibatch <= 0) return CTC_STATUS_INVALID_VALUE; GpuCTC<Dtype> ctc(alphabet_size, minibatch, workspace, options.stream, options.blank_label); if (gradients != NULL) return ctc.cost_and_grad(activations, gradients, costs, flat_labels, label_lengths, input_lengths); else return ctc.score_forward(activations, costs, flat_labels, label_lengths, input_lengths); } ctcStatus_t FUN(get_workspace_size)(const int* const label_lengths, const int* const input_lengths, int alphabet_size, int minibatch, ctcOptions options, size_t* size_bytes) { if (label_lengths == nullptr || input_lengths == nullptr || size_bytes == nullptr || alphabet_size <= 0 || minibatch <= 0) return CTC_STATUS_INVALID_VALUE; // This is the max of all S and T for all examples in the minibatch. int maxL = *std::max_element(label_lengths, label_lengths + minibatch); int maxT = *std::max_element(input_lengths, input_lengths + minibatch); const int S = 2 * maxL + 1; *size_bytes = 0; // GPU storage //nll_forward, nll_backward *size_bytes += 2 * sizeof(Dtype) * minibatch; //repeats *size_bytes += sizeof(int) * minibatch; //label offsets *size_bytes += sizeof(int) * minibatch; //utt_length *size_bytes += sizeof(int) * minibatch; //label lengths *size_bytes += sizeof(int) * minibatch; //labels without blanks - overallocate for now *size_bytes += sizeof(int) * maxL * minibatch; //labels with blanks *size_bytes += sizeof(int) * S * minibatch; //alphas *size_bytes += sizeof(Dtype) * S * maxT * minibatch; //denoms *size_bytes += sizeof(Dtype) * maxT * minibatch; //probs (since we will pass in activations) *size_bytes += sizeof(Dtype) * alphabet_size * maxT * minibatch; return CTC_STATUS_SUCCESS; } void FUN(ExtractInputData)(int T_, int N_, int C_, int blank_index_, const Dtype* seq_ind_data, const Dtype* labels_data, vector<int>* flat_labels, vector<int>* label_lengths, vector<int>* input_lengths) { const Dtype* seq_ind = seq_ind_data; const Dtype* target_seq = labels_data; flat_labels->clear(); flat_labels->reserve(T_ * N_); // maximum required label_lengths->resize(N_); input_lengths->resize(N_); // compute the sequence length and label length int* seq_len = input_lengths->data(); int* label_len = label_lengths->data(); int label_offset = 0; //if (blank_index_ == -1) { if (blank_index_ == 0) {//modified by jxs label_offset = 1; } for (int n = 0; n < N_; ++n) { seq_len[n] = T_; // default value is maximal allowed length label_len[n] = T_; // default value is maximal allowed length const Dtype* seq = seq_ind + n; const Dtype* label = target_seq + n; // sequence indicators start with seq == 0.0 to indicate the start of a // sequence. Skip at t = 0, so start at t = 1 seq += N_; for (int t = 1; t < T_; ++t) { if (static_cast<int>(*seq + 0.5) == 0) { seq_len[n] = t; break; } seq += N_; } // label indicators are negative if the sequence has ended for (int t = 0; t < T_; ++t) { if (*label < 0.0) { label_len[n] = t; break; } // Note that the blank label will be 0 flat_labels->push_back(static_cast<int>(*label + 0.5) + label_offset); label += N_; } // if the label length is 0, the seq_len is 1 (0 following 0) // set seq_len to 0 in this case aswell, to skip this example if (label_len[n] == 0) { CHECK_LE(seq_len[n], 1); seq_len[n] = 0; } CHECK_LE(label_len[n], seq_len[n]) << "The label length must be smaller or equals the sequence length!"; } } void FUN(warp_ctc_loss_fwd)(int T_, int N_, int C_, int count, int blank_index_, const Dtype* bottom0_data, Dtype* bottom0_mdiff, const Dtype* bottom1_data, const Dtype* bottom2_data, const Dtype* bottom3_data, Dtype* top) { const Dtype* activations = bottom0_data; Dtype* gradients = bottom0_mdiff; const int alphabet_size = C_; const int minibatch = N_; int bottom_size = (bottom0_data != NULL) + (bottom1_data != NULL) + (bottom2_data != NULL) + (bottom3_data != NULL); vector<int> flat_labels_; vector<int> label_lengths_; vector<int> input_lengths_; label_lengths_.resize(N_); input_lengths_.resize(N_); vector<Dtype> costs(N_); flat_labels_.clear(); if (bottom_size == 2) {//bottom[0]=activations, bottom[1] is labels, shape: Batchsize*seq len const Dtype* label_seq_d = bottom1_data; int label_len_per_batch = count / N_; for (int n = 0; n < N_; ++n) { int curlen = 0; for (int l = 0; l < label_len_per_batch; ++l) { int label = (int)label_seq_d[n * label_len_per_batch + l]; if (label <= blank_index_) { continue; } flat_labels_.push_back(label); curlen++; } label_lengths_[n] = curlen; input_lengths_[n] = T_; } } else if (bottom_size == 3) { FUN(ExtractInputData)(T_, N_, C_, blank_index_, bottom1_data, bottom2_data, &flat_labels_, &label_lengths_, &input_lengths_); } else if (bottom_size == 4) { //Blob* seq_len_blob = bottom1_data; //Blob* lab_len_blob = bottom2_data; //Blob* label_seq_blob = bottom3_data; const Dtype* seq_len_d = bottom1_data; const Dtype* lab_len_d = bottom2_data; const Dtype* label_seq_d = bottom3_data; int accumulated = 0; int label_len_per_batch = count / N_; //CHECK_EQ(seq_len_blob->count(), lab_len_blob->count()); for (int i = 0; i < count; ++i) { label_lengths_[i] = (int)lab_len_d[i]; input_lengths_[i] = (int)seq_len_d[i]; accumulated += (int)lab_len_d[i]; } flat_labels_.clear(); flat_labels_.reserve(accumulated); for (int n = 0; n < N_; ++n) { for (int t = 0; t < label_lengths_[n]; ++t) { flat_labels_.push_back((int)label_seq_d[n*label_len_per_batch + t]); } } } else { LOG(FATAL) << "Unsupported blobs shape"; } //remove repeat blank labels size_t workspace_alloc_bytes_; ctcOptions options; cudaStream_t stream; CHECK_EQ(cudaStreamCreate(&stream), CUDA_SUCCESS); options.loc = CTC_GPU; options.stream = stream; options.blank_label = blank_index_; ctcStatus_t status = FUN(get_workspace_size)(label_lengths_.data(), input_lengths_.data(), alphabet_size, minibatch, options, &workspace_alloc_bytes_); CHECK_EQ(status, CTC_STATUS_SUCCESS) << "CTC Error: " << ctcGetStatusString(status); Buffer workspace_[1] = { 0 }; if (workspace_->size< workspace_alloc_bytes_) { gpu_ReAlloc(workspace_, workspace_alloc_bytes_ * sizeof(char)); } //cuda_compute_ctc_loss; status = FUN(compute_ctc_loss)(activations, gradients, flat_labels_.data(), label_lengths_.data(), input_lengths_.data(), alphabet_size, minibatch, costs.data(), workspace_->data, options ); CHECK_EQ(status, CTC_STATUS_SUCCESS) << "CTC Error: " << ctcGetStatusString(status); // output loss Dtype loss;// = top_mdata()[0]; loss = 0; int num = 1; for (int n = 0; n < N_; ++n) { if (costs[n] < std::numeric_limits<Dtype>::infinity()) { loss += costs[n]; ++num; } } if (num==1) { int asdf = 0; } loss /= num; if (isnan(loss)) { int asdf = 0; } *top = loss; Free(workspace_); #if 0 int gcnt = bottom[0]->count(); Dtype sumg = 0; for (int i = 0; i < gcnt; i++) { sumg += fabs(gradients[i]); } //LOG(INFO) << "mean ctc loss=" << loss << ",N_="<<N_<<",num="<<num << ", mean gradients="<<sumg/gcnt; #endif CHECK_EQ(cudaStreamDestroy(stream), CUDA_SUCCESS); return; }
93f035ad19a11573752caeedfb8b51abcd3983d8.hip
// !!! This is a file automatically generated by hipify!!! /* * image reshaper will allow the user to divide an image up into a matrix where each row is a subimage centered around a different pixel * [y] = im_reshape(x,w) * */ #include <mex.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <hip/hip_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #define O_TILE_WIDTH 20 // variable to determine how many output tiles will be considered in a block # define BLOCK_WIDTH (O_TILE_WIDTH + (7-1)) // gpu high pass filter void __global__ imswitch(float *d_x, float *d_y, int w, int irow, int icol) { __shared__ float d_xs[(BLOCK_WIDTH)][(BLOCK_WIDTH)]; // Build GPU coordinates int tx = threadIdx.x; int ty = threadIdx.y; int v = 2 * w + 1; int row_output = blockIdx.y*O_TILE_WIDTH + ty; int col_output = blockIdx.x*O_TILE_WIDTH + tx; int row_input = row_output - w; int col_input = col_output - w; // Pad the region if ((row_input >= 0) && (row_input < irow) && (col_input >= 0) && (col_input < icol)) { // if statement checks the row/col indices to ensure they fall onto the input image d_xs[ty][tx] = d_x[row_input*icol + col_input]; // if true, the value of the image is written to the shared array at location d_i2[ty][tx] and stored locally } // on the block else { d_xs[ty][tx] = 0; // If row/col do not satisfy boundary condtions then assign a 0 to the value to build and apron of } __syncthreads(); if (ty < O_TILE_WIDTH && tx < O_TILE_WIDTH) { for (int i = 0; i < v; i++) { for (int j = 0; j < v; j++) { // if(row_output < irow && col_output <icol){ d_y[(i*v +j)*irow*icol+row_output*icol+col_output] = d_xs[ty+i][tx+j]; } } } } } void flt2dub(float *x, double *y, int N) { for (int i = 0; i < N; i++) { y[i] = (double)x[i]; } } // main void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { printf("Before anything\n"); // Variable Declaration float *x, *dx, *dy, *y; double *oy; // Get memory size of signal const size_t *dims; dims = mxGetDimensions(prhs[0]); int m = (int)dims[0]; int n = (int)dims[1]; const int mem_size = m*n*sizeof(float); const int w = mxGetScalar(prhs[1]); const int v = 2 * w + 1; // allocate space on host for data y = (float *)mxMalloc(mem_size*v*v); oy = (double *)mxMalloc(mem_size*v*v * 2); x = (float *)mxGetPr(prhs[0]); printf("Before memory allocation\n"); // allocate space on device for signal checkCudaErrors(hipMalloc((void**)&dx, mem_size)); checkCudaErrors(hipMalloc((void**)&dy, mem_size*v*v)); // Copy data over to device printf("Before memory copy\n"); checkCudaErrors(hipMemcpy(dx, x, mem_size, hipMemcpyHostToDevice)); printf("Before GPU execution\n"); // at this point the memory is on the GPU ready to be manipulated dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH); // run 2-D gpu kernel to help with indexing dim3 dimGrid((n - 1) / O_TILE_WIDTH + 1, (m - 1) / O_TILE_WIDTH + 1, 1); imswitch << <dimGrid, dimBlock >> > (dx, dy, w, m, n); printf("Before Pointer Manipluation\n"); plhs[0] = mxCreateDoubleMatrix(m*n, v*v, mxREAL); oy = mxGetPr(plhs[0]); printf("before mem copy\n"); checkCudaErrors(hipMemcpy(y,dy,mem_size*v*v, hipMemcpyDeviceToHost)); printf("everything is ok\n"); printf("Value of y is %d\n", m); flt2dub(y, oy, m*n*v*v); hipFree(dy); hipFree(dx); }
93f035ad19a11573752caeedfb8b51abcd3983d8.cu
/* * image reshaper will allow the user to divide an image up into a matrix where each row is a subimage centered around a different pixel * [y] = im_reshape(x,w) * */ #include <mex.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cuda_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #define O_TILE_WIDTH 20 // variable to determine how many output tiles will be considered in a block # define BLOCK_WIDTH (O_TILE_WIDTH + (7-1)) // gpu high pass filter void __global__ imswitch(float *d_x, float *d_y, int w, int irow, int icol) { __shared__ float d_xs[(BLOCK_WIDTH)][(BLOCK_WIDTH)]; // Build GPU coordinates int tx = threadIdx.x; int ty = threadIdx.y; int v = 2 * w + 1; int row_output = blockIdx.y*O_TILE_WIDTH + ty; int col_output = blockIdx.x*O_TILE_WIDTH + tx; int row_input = row_output - w; int col_input = col_output - w; // Pad the region if ((row_input >= 0) && (row_input < irow) && (col_input >= 0) && (col_input < icol)) { // if statement checks the row/col indices to ensure they fall onto the input image d_xs[ty][tx] = d_x[row_input*icol + col_input]; // if true, the value of the image is written to the shared array at location d_i2[ty][tx] and stored locally } // on the block else { d_xs[ty][tx] = 0; // If row/col do not satisfy boundary condtions then assign a 0 to the value to build and apron of } __syncthreads(); if (ty < O_TILE_WIDTH && tx < O_TILE_WIDTH) { for (int i = 0; i < v; i++) { for (int j = 0; j < v; j++) { // if(row_output < irow && col_output <icol){ d_y[(i*v +j)*irow*icol+row_output*icol+col_output] = d_xs[ty+i][tx+j]; } } } } } void flt2dub(float *x, double *y, int N) { for (int i = 0; i < N; i++) { y[i] = (double)x[i]; } } // main void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { printf("Before anything\n"); // Variable Declaration float *x, *dx, *dy, *y; double *oy; // Get memory size of signal const size_t *dims; dims = mxGetDimensions(prhs[0]); int m = (int)dims[0]; int n = (int)dims[1]; const int mem_size = m*n*sizeof(float); const int w = mxGetScalar(prhs[1]); const int v = 2 * w + 1; // allocate space on host for data y = (float *)mxMalloc(mem_size*v*v); oy = (double *)mxMalloc(mem_size*v*v * 2); x = (float *)mxGetPr(prhs[0]); printf("Before memory allocation\n"); // allocate space on device for signal checkCudaErrors(cudaMalloc((void**)&dx, mem_size)); checkCudaErrors(cudaMalloc((void**)&dy, mem_size*v*v)); // Copy data over to device printf("Before memory copy\n"); checkCudaErrors(cudaMemcpy(dx, x, mem_size, cudaMemcpyHostToDevice)); printf("Before GPU execution\n"); // at this point the memory is on the GPU ready to be manipulated dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH); // run 2-D gpu kernel to help with indexing dim3 dimGrid((n - 1) / O_TILE_WIDTH + 1, (m - 1) / O_TILE_WIDTH + 1, 1); imswitch << <dimGrid, dimBlock >> > (dx, dy, w, m, n); printf("Before Pointer Manipluation\n"); plhs[0] = mxCreateDoubleMatrix(m*n, v*v, mxREAL); oy = mxGetPr(plhs[0]); printf("before mem copy\n"); checkCudaErrors(cudaMemcpy(y,dy,mem_size*v*v, cudaMemcpyDeviceToHost)); printf("everything is ok\n"); printf("Value of y is %d\n", m); flt2dub(y, oy, m*n*v*v); cudaFree(dy); cudaFree(dx); }
0a42bceed208f1ce95552b27ee56cf0121e29fb0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/zgeaxpy.cu, normal z -> c, Tue Aug 30 09:38:41 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // axpy kernel for matrices stored in the MAGMA format __global__ void cgeaxpy_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if( row<num_rows ){ for( j=0; j<num_cols; j++ ){ int idx = row + j*num_rows; dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ]; } } } /** Purpose ------- This routine computes Y = alpha * X + beta * Y on the GPU. The input format is a dense matrix (vector block) stored in magma_c_matrix format. Arguments --------- @param[in] alpha magmaFloatComplex scalar multiplier. @param[in] X magma_c_matrix input/output matrix Y. @param[in] beta magmaFloatComplex scalar multiplier. @param[in,out] Y magma_c_matrix* input matrix X. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgeaxpy( magmaFloatComplex alpha, magma_c_matrix X, magmaFloatComplex beta, magma_c_matrix *Y, magma_queue_t queue ) { int m = X.num_rows; int n = X.num_cols; dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( cgeaxpy_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, X.dval, beta, Y->dval ); return MAGMA_SUCCESS; }
0a42bceed208f1ce95552b27ee56cf0121e29fb0.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/zgeaxpy.cu, normal z -> c, Tue Aug 30 09:38:41 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // axpy kernel for matrices stored in the MAGMA format __global__ void cgeaxpy_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if( row<num_rows ){ for( j=0; j<num_cols; j++ ){ int idx = row + j*num_rows; dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ]; } } } /** Purpose ------- This routine computes Y = alpha * X + beta * Y on the GPU. The input format is a dense matrix (vector block) stored in magma_c_matrix format. Arguments --------- @param[in] alpha magmaFloatComplex scalar multiplier. @param[in] X magma_c_matrix input/output matrix Y. @param[in] beta magmaFloatComplex scalar multiplier. @param[in,out] Y magma_c_matrix* input matrix X. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgeaxpy( magmaFloatComplex alpha, magma_c_matrix X, magmaFloatComplex beta, magma_c_matrix *Y, magma_queue_t queue ) { int m = X.num_rows; int n = X.num_cols; dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; cgeaxpy_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, alpha, X.dval, beta, Y->dval ); return MAGMA_SUCCESS; }
3c7da506b775bc065249ccdddf491095d7f37e27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #include "THHTensor.hpp" #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include "THHHalf.h" #include "THHHalfAutoNumerics.cuh" /* * Description: */ __device__ int translate_idx(int ii, int d1, int d2, int d3, int d4, int scale_factor) { int x, y, z, w, v; v = ii % d4; ii = ii/d4; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; v = v/scale_factor; w = w/scale_factor; z = z/scale_factor; d2 /= scale_factor; d3 /= scale_factor; d4 /= scale_factor; return ((((x*d1+y)*d2)+z)*d3+w)*d4+v; } __device__ int translate_idx_inv(int ii, int d1, int d2, int d3, int d4, int scale_factor, int off_x, int off_y, int off_z) { int x, y, z, w, v; v = ii % d4; ii = ii/d4; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; v = v*scale_factor+off_x; w = w*scale_factor+off_y; z = z*scale_factor+off_z; d2 *= scale_factor; d3 *= scale_factor; d4 *= scale_factor; return ((((x*d1+y)*d2)+z)*d3+w)*d4+v; } template <typename Dtype> __global__ void vupscale(Dtype *input, Dtype *output, int64_t no_elements, int scale_factor, int d1, int d2, int d3, int d4) { // output offset: int64_t ii = threadIdx.x + blockDim.x * blockIdx.x; ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y; if (ii >= no_elements) return; int ipidx = translate_idx(ii, d1, d2, d3, d4, scale_factor); output[ii]=input[ipidx]; } /* * Description: */ template <typename Dtype, typename Acctype> __global__ void vdownscale(Dtype *gradInput_data, Dtype *gradOutput_data, int64_t no_elements, int scale_factor, int d1, int d2, int d3, int d4) { // output offset: int64_t ii = threadIdx.x + blockDim.x * blockIdx.x; ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y; if (ii >= no_elements) return; Acctype sum = Acctype(0); for (int i=0; i < scale_factor; i++){ for(int j=0; j < scale_factor; j++){ for(int k=0; k < scale_factor; k++){ int ipidx = translate_idx_inv(ii, d1, d2, d3, d4, scale_factor, i, j, k); sum += gradOutput_data[ipidx]; } } } gradInput_data[ii] += ScalarConvert<Acctype, Dtype>::to(sum); } #include "generic/VolumetricUpSamplingNearest.cu" #include "THHGenerateFloatTypes.h"
3c7da506b775bc065249ccdddf491095d7f37e27.cu
#include "THCUNN.h" #include "common.h" #include "THCTensor.hpp" #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include "THCHalf.h" #include "THCHalfAutoNumerics.cuh" /* * Description: */ __device__ int translate_idx(int ii, int d1, int d2, int d3, int d4, int scale_factor) { int x, y, z, w, v; v = ii % d4; ii = ii/d4; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; v = v/scale_factor; w = w/scale_factor; z = z/scale_factor; d2 /= scale_factor; d3 /= scale_factor; d4 /= scale_factor; return ((((x*d1+y)*d2)+z)*d3+w)*d4+v; } __device__ int translate_idx_inv(int ii, int d1, int d2, int d3, int d4, int scale_factor, int off_x, int off_y, int off_z) { int x, y, z, w, v; v = ii % d4; ii = ii/d4; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; v = v*scale_factor+off_x; w = w*scale_factor+off_y; z = z*scale_factor+off_z; d2 *= scale_factor; d3 *= scale_factor; d4 *= scale_factor; return ((((x*d1+y)*d2)+z)*d3+w)*d4+v; } template <typename Dtype> __global__ void vupscale(Dtype *input, Dtype *output, int64_t no_elements, int scale_factor, int d1, int d2, int d3, int d4) { // output offset: int64_t ii = threadIdx.x + blockDim.x * blockIdx.x; ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y; if (ii >= no_elements) return; int ipidx = translate_idx(ii, d1, d2, d3, d4, scale_factor); output[ii]=input[ipidx]; } /* * Description: */ template <typename Dtype, typename Acctype> __global__ void vdownscale(Dtype *gradInput_data, Dtype *gradOutput_data, int64_t no_elements, int scale_factor, int d1, int d2, int d3, int d4) { // output offset: int64_t ii = threadIdx.x + blockDim.x * blockIdx.x; ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y; if (ii >= no_elements) return; Acctype sum = Acctype(0); for (int i=0; i < scale_factor; i++){ for(int j=0; j < scale_factor; j++){ for(int k=0; k < scale_factor; k++){ int ipidx = translate_idx_inv(ii, d1, d2, d3, d4, scale_factor, i, j, k); sum += gradOutput_data[ipidx]; } } } gradInput_data[ii] += ScalarConvert<Acctype, Dtype>::to(sum); } #include "generic/VolumetricUpSamplingNearest.cu" #include "THCGenerateFloatTypes.h"
0eba8c0c4c13af2690a7771451172a8ad5b6f9c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ __shared__ float __tilevar_0__[5][128]; __shared__ float __tilevar_1__[5][128]; __shared__ float __tilevar_2__[5][128]; __shared__ float __tilevar_3__[5][128]; int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-16); int __iter_y__ = FORMA_MAX((int)(blockIdx.y)*FORMA_BLOCKDIM_Y-8, 0); //Initialize the values int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ __tilevar_0__[1][__iter_3__-__iter_0__] = input[__iter_3__+M*__iter_y__]; __tilevar_0__[2][__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_y__+1)]; __tilevar_0__[3][__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_y__+2)]; __tilevar_0__[4][__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_y__+3)]; } // Initial computation for (int __iter_1__ = __iter_y__+2; __iter_1__ < FORMA_MIN(N-2,__iter_y__+14); __iter_1__++) { if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_0__[0][__iter_3__-__iter_0__] = __tilevar_0__[1][__iter_3__-__iter_0__]; __tilevar_0__[1][__iter_3__-__iter_0__] = __tilevar_0__[2][__iter_3__-__iter_0__]; __tilevar_0__[2][__iter_3__-__iter_0__] = __tilevar_0__[3][__iter_3__-__iter_0__]; __tilevar_0__[3][__iter_3__-__iter_0__] = __tilevar_0__[4][__iter_3__-__iter_0__]; __tilevar_0__[4][__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__+2)]; } __syncthreads (); if (__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3)) ){ float __temp_2__ = (__tilevar_0__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_0__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_0__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_0__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_0__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_0__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_0__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_0__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_0__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_0__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_0__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_0__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_0__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_0__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_0__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_0__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_0__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_0__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_0__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_0__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_0__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_0__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_0__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_0__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_0__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __tilevar_1__[0][__iter_3__-__iter_0__] = __tilevar_1__[1][__iter_3__-__iter_0__]; __tilevar_1__[1][__iter_3__-__iter_0__] = __tilevar_1__[2][__iter_3__-__iter_0__]; __tilevar_1__[2][__iter_3__-__iter_0__] = __tilevar_1__[3][__iter_3__-__iter_0__]; __tilevar_1__[3][__iter_3__-__iter_0__] = __tilevar_1__[4][__iter_3__-__iter_0__]; __tilevar_1__[4][__iter_3__-__iter_0__] = __temp_99__; } __syncthreads(); if (__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) { float __temp_2__ = (__tilevar_1__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_1__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_1__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_1__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_1__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_1__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_1__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_1__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_1__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_1__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_1__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_1__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_1__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_1__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_1__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_1__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_1__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_1__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_1__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_1__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_1__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_1__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_1__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_1__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_1__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __tilevar_2__[0][__iter_3__-__iter_0__] = __tilevar_2__[1][__iter_3__-__iter_0__]; __tilevar_2__[1][__iter_3__-__iter_0__] = __tilevar_2__[2][__iter_3__-__iter_0__]; __tilevar_2__[2][__iter_3__-__iter_0__] = __tilevar_2__[3][__iter_3__-__iter_0__]; __tilevar_2__[3][__iter_3__-__iter_0__] = __tilevar_2__[4][__iter_3__-__iter_0__]; __tilevar_2__[4][__iter_3__-__iter_0__] = __temp_99__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+6),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) { float __temp_2__ = (__tilevar_2__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_2__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_2__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_2__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_2__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_2__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_2__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_2__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_2__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_2__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_2__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_2__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_2__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_2__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_2__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_2__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_2__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_2__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_2__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_2__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_2__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_2__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_2__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_2__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_2__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __tilevar_3__[0][__iter_3__-__iter_0__] = __tilevar_3__[1][__iter_3__-__iter_0__]; __tilevar_3__[1][__iter_3__-__iter_0__] = __tilevar_3__[2][__iter_3__-__iter_0__]; __tilevar_3__[2][__iter_3__-__iter_0__] = __tilevar_3__[3][__iter_3__-__iter_0__]; __tilevar_3__[3][__iter_3__-__iter_0__] = __tilevar_3__[4][__iter_3__-__iter_0__]; __tilevar_3__[4][__iter_3__-__iter_0__] = __temp_99__; } } // Rest of the computation for (int __iter_1__ = FORMA_MAX(2,__iter_y__+14); __iter_1__ < FORMA_MIN(N-2,__iter_y__+FORMA_BLOCKDIM_Y+14); __iter_1__++) { if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_0__[0][__iter_3__-__iter_0__] = __tilevar_0__[1][__iter_3__-__iter_0__]; __tilevar_0__[1][__iter_3__-__iter_0__] = __tilevar_0__[2][__iter_3__-__iter_0__]; __tilevar_0__[2][__iter_3__-__iter_0__] = __tilevar_0__[3][__iter_3__-__iter_0__]; __tilevar_0__[3][__iter_3__-__iter_0__] = __tilevar_0__[4][__iter_3__-__iter_0__]; __tilevar_0__[4][__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__+2)]; } __syncthreads (); if (__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3)) ){ float __temp_2__ = (__tilevar_0__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_0__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_0__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_0__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_0__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_0__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_0__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_0__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_0__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_0__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_0__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_0__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_0__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_0__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_0__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_0__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_0__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_0__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_0__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_0__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_0__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_0__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_0__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_0__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_0__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __tilevar_1__[0][__iter_3__-__iter_0__] = __tilevar_1__[1][__iter_3__-__iter_0__]; __tilevar_1__[1][__iter_3__-__iter_0__] = __tilevar_1__[2][__iter_3__-__iter_0__]; __tilevar_1__[2][__iter_3__-__iter_0__] = __tilevar_1__[3][__iter_3__-__iter_0__]; __tilevar_1__[3][__iter_3__-__iter_0__] = __tilevar_1__[4][__iter_3__-__iter_0__]; __tilevar_1__[4][__iter_3__-__iter_0__] = __temp_99__; } __syncthreads(); if (__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) { float __temp_2__ = (__tilevar_1__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_1__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_1__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_1__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_1__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_1__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_1__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_1__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_1__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_1__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_1__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_1__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_1__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_1__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_1__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_1__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_1__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_1__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_1__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_1__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_1__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_1__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_1__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_1__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_1__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __tilevar_2__[0][__iter_3__-__iter_0__] = __tilevar_2__[1][__iter_3__-__iter_0__]; __tilevar_2__[1][__iter_3__-__iter_0__] = __tilevar_2__[2][__iter_3__-__iter_0__]; __tilevar_2__[2][__iter_3__-__iter_0__] = __tilevar_2__[3][__iter_3__-__iter_0__]; __tilevar_2__[3][__iter_3__-__iter_0__] = __tilevar_2__[4][__iter_3__-__iter_0__]; __tilevar_2__[4][__iter_3__-__iter_0__] = __temp_99__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+6),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) { float __temp_2__ = (__tilevar_2__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_2__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_2__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_2__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_2__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_2__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_2__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_2__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_2__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_2__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_2__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_2__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_2__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_2__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_2__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_2__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_2__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_2__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_2__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_2__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_2__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_2__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_2__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_2__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_2__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __tilevar_3__[0][__iter_3__-__iter_0__] = __tilevar_3__[1][__iter_3__-__iter_0__]; __tilevar_3__[1][__iter_3__-__iter_0__] = __tilevar_3__[2][__iter_3__-__iter_0__]; __tilevar_3__[2][__iter_3__-__iter_0__] = __tilevar_3__[3][__iter_3__-__iter_0__]; __tilevar_3__[3][__iter_3__-__iter_0__] = __tilevar_3__[4][__iter_3__-__iter_0__]; __tilevar_3__[4][__iter_3__-__iter_0__] = __temp_99__; } __syncthreads(); if (__iter_3__ >= FORMA_MAX((__iter_0__+8),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))){ float __temp_2__ = (__tilevar_3__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_3__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_3__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_3__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_3__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_3__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_3__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_3__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_3__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_3__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_3__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_3__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_3__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_3__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_3__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_3__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_3__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_3__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_3__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_3__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_3__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_3__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_3__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_3__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_3__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __var_1__[__iter_3__+M*FORMA_MAX(__iter_1__-6,0)] = __temp_99__; } } } /*Device code End */ /* Host Code Begin */ extern "C" void gaussian(float * h_input, int N, int M, float * __var_0__){ /* Host allocation Begin */ float * input; hipMalloc(&input,sizeof(float)*(N*M)); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(float)*(N*M), memcpy_kind_h_input); } float * __var_1__; hipMalloc(&__var_1__,sizeof(float)*(N*M)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = M; int __size_1___kernel___forma_kernel__0__ = N; int __block_0___kernel___forma_kernel__0__ = 128; int __block_1___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-16); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__, __size_1___kernel___forma_kernel__0__/16); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), 0, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_1___kernel___forma_kernel__0__/16, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(float)*(N*M), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); } /*Host Free End*/
0eba8c0c4c13af2690a7771451172a8ad5b6f9c6.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ __shared__ float __tilevar_0__[5][128]; __shared__ float __tilevar_1__[5][128]; __shared__ float __tilevar_2__[5][128]; __shared__ float __tilevar_3__[5][128]; int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-16); int __iter_y__ = FORMA_MAX((int)(blockIdx.y)*FORMA_BLOCKDIM_Y-8, 0); //Initialize the values int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ __tilevar_0__[1][__iter_3__-__iter_0__] = input[__iter_3__+M*__iter_y__]; __tilevar_0__[2][__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_y__+1)]; __tilevar_0__[3][__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_y__+2)]; __tilevar_0__[4][__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_y__+3)]; } // Initial computation for (int __iter_1__ = __iter_y__+2; __iter_1__ < FORMA_MIN(N-2,__iter_y__+14); __iter_1__++) { if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_0__[0][__iter_3__-__iter_0__] = __tilevar_0__[1][__iter_3__-__iter_0__]; __tilevar_0__[1][__iter_3__-__iter_0__] = __tilevar_0__[2][__iter_3__-__iter_0__]; __tilevar_0__[2][__iter_3__-__iter_0__] = __tilevar_0__[3][__iter_3__-__iter_0__]; __tilevar_0__[3][__iter_3__-__iter_0__] = __tilevar_0__[4][__iter_3__-__iter_0__]; __tilevar_0__[4][__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__+2)]; } __syncthreads (); if (__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3)) ){ float __temp_2__ = (__tilevar_0__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_0__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_0__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_0__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_0__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_0__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_0__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_0__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_0__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_0__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_0__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_0__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_0__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_0__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_0__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_0__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_0__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_0__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_0__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_0__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_0__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_0__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_0__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_0__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_0__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __tilevar_1__[0][__iter_3__-__iter_0__] = __tilevar_1__[1][__iter_3__-__iter_0__]; __tilevar_1__[1][__iter_3__-__iter_0__] = __tilevar_1__[2][__iter_3__-__iter_0__]; __tilevar_1__[2][__iter_3__-__iter_0__] = __tilevar_1__[3][__iter_3__-__iter_0__]; __tilevar_1__[3][__iter_3__-__iter_0__] = __tilevar_1__[4][__iter_3__-__iter_0__]; __tilevar_1__[4][__iter_3__-__iter_0__] = __temp_99__; } __syncthreads(); if (__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) { float __temp_2__ = (__tilevar_1__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_1__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_1__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_1__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_1__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_1__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_1__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_1__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_1__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_1__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_1__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_1__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_1__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_1__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_1__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_1__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_1__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_1__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_1__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_1__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_1__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_1__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_1__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_1__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_1__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __tilevar_2__[0][__iter_3__-__iter_0__] = __tilevar_2__[1][__iter_3__-__iter_0__]; __tilevar_2__[1][__iter_3__-__iter_0__] = __tilevar_2__[2][__iter_3__-__iter_0__]; __tilevar_2__[2][__iter_3__-__iter_0__] = __tilevar_2__[3][__iter_3__-__iter_0__]; __tilevar_2__[3][__iter_3__-__iter_0__] = __tilevar_2__[4][__iter_3__-__iter_0__]; __tilevar_2__[4][__iter_3__-__iter_0__] = __temp_99__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+6),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) { float __temp_2__ = (__tilevar_2__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_2__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_2__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_2__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_2__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_2__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_2__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_2__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_2__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_2__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_2__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_2__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_2__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_2__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_2__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_2__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_2__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_2__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_2__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_2__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_2__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_2__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_2__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_2__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_2__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __tilevar_3__[0][__iter_3__-__iter_0__] = __tilevar_3__[1][__iter_3__-__iter_0__]; __tilevar_3__[1][__iter_3__-__iter_0__] = __tilevar_3__[2][__iter_3__-__iter_0__]; __tilevar_3__[2][__iter_3__-__iter_0__] = __tilevar_3__[3][__iter_3__-__iter_0__]; __tilevar_3__[3][__iter_3__-__iter_0__] = __tilevar_3__[4][__iter_3__-__iter_0__]; __tilevar_3__[4][__iter_3__-__iter_0__] = __temp_99__; } } // Rest of the computation for (int __iter_1__ = FORMA_MAX(2,__iter_y__+14); __iter_1__ < FORMA_MIN(N-2,__iter_y__+FORMA_BLOCKDIM_Y+14); __iter_1__++) { if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_0__[0][__iter_3__-__iter_0__] = __tilevar_0__[1][__iter_3__-__iter_0__]; __tilevar_0__[1][__iter_3__-__iter_0__] = __tilevar_0__[2][__iter_3__-__iter_0__]; __tilevar_0__[2][__iter_3__-__iter_0__] = __tilevar_0__[3][__iter_3__-__iter_0__]; __tilevar_0__[3][__iter_3__-__iter_0__] = __tilevar_0__[4][__iter_3__-__iter_0__]; __tilevar_0__[4][__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__+2)]; } __syncthreads (); if (__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3)) ){ float __temp_2__ = (__tilevar_0__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_0__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_0__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_0__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_0__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_0__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_0__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_0__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_0__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_0__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_0__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_0__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_0__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_0__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_0__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_0__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_0__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_0__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_0__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_0__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_0__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_0__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_0__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_0__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_0__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __tilevar_1__[0][__iter_3__-__iter_0__] = __tilevar_1__[1][__iter_3__-__iter_0__]; __tilevar_1__[1][__iter_3__-__iter_0__] = __tilevar_1__[2][__iter_3__-__iter_0__]; __tilevar_1__[2][__iter_3__-__iter_0__] = __tilevar_1__[3][__iter_3__-__iter_0__]; __tilevar_1__[3][__iter_3__-__iter_0__] = __tilevar_1__[4][__iter_3__-__iter_0__]; __tilevar_1__[4][__iter_3__-__iter_0__] = __temp_99__; } __syncthreads(); if (__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) { float __temp_2__ = (__tilevar_1__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_1__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_1__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_1__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_1__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_1__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_1__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_1__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_1__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_1__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_1__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_1__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_1__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_1__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_1__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_1__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_1__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_1__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_1__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_1__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_1__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_1__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_1__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_1__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_1__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __tilevar_2__[0][__iter_3__-__iter_0__] = __tilevar_2__[1][__iter_3__-__iter_0__]; __tilevar_2__[1][__iter_3__-__iter_0__] = __tilevar_2__[2][__iter_3__-__iter_0__]; __tilevar_2__[2][__iter_3__-__iter_0__] = __tilevar_2__[3][__iter_3__-__iter_0__]; __tilevar_2__[3][__iter_3__-__iter_0__] = __tilevar_2__[4][__iter_3__-__iter_0__]; __tilevar_2__[4][__iter_3__-__iter_0__] = __temp_99__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+6),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) { float __temp_2__ = (__tilevar_2__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_2__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_2__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_2__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_2__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_2__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_2__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_2__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_2__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_2__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_2__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_2__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_2__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_2__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_2__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_2__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_2__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_2__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_2__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_2__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_2__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_2__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_2__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_2__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_2__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __tilevar_3__[0][__iter_3__-__iter_0__] = __tilevar_3__[1][__iter_3__-__iter_0__]; __tilevar_3__[1][__iter_3__-__iter_0__] = __tilevar_3__[2][__iter_3__-__iter_0__]; __tilevar_3__[2][__iter_3__-__iter_0__] = __tilevar_3__[3][__iter_3__-__iter_0__]; __tilevar_3__[3][__iter_3__-__iter_0__] = __tilevar_3__[4][__iter_3__-__iter_0__]; __tilevar_3__[4][__iter_3__-__iter_0__] = __temp_99__; } __syncthreads(); if (__iter_3__ >= FORMA_MAX((__iter_0__+8),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))){ float __temp_2__ = (__tilevar_3__[0][__iter_3__-2-__iter_0__]); float __temp_5__ = (__tilevar_3__[0][__iter_3__-1-__iter_0__]); float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__); float __temp_9__ = (__tilevar_3__[0][__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 5 * __temp_9__); float __temp_13__ = (__tilevar_3__[0][__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 4 * __temp_13__); float __temp_17__ = (__tilevar_3__[0][__iter_3__+2-__iter_0__]); float __temp_18__ = (__temp_14__ + 2 * __temp_17__); float __temp_21__ = (__tilevar_3__[1][__iter_3__-2-__iter_0__]); float __temp_22__ = (__temp_18__ + 4 * __temp_21__); float __temp_25__ = (__tilevar_3__[1][__iter_3__-1-__iter_0__]); float __temp_26__ = (__temp_22__ + 9 * __temp_25__); float __temp_29__ = (__tilevar_3__[1][__iter_3__-__iter_0__]); float __temp_30__ = (__temp_26__ + 12 * __temp_29__); float __temp_33__ = (__tilevar_3__[1][__iter_3__+1-__iter_0__]); float __temp_34__ = (__temp_30__ + 9 * __temp_33__); float __temp_37__ = (__tilevar_3__[1][__iter_3__+2-__iter_0__]); float __temp_38__ = (__temp_34__ + 4 * __temp_37__); float __temp_41__ = (__tilevar_3__[2][__iter_3__-2-__iter_0__]); float __temp_42__ = (__temp_38__ + 5 * __temp_41__); float __temp_45__ = (__tilevar_3__[2][__iter_3__-1-__iter_0__]); float __temp_46__ = (__temp_42__ + 12 * __temp_45__); float __temp_49__ = (__tilevar_3__[2][__iter_3__-__iter_0__]); float __temp_50__ = (__temp_46__ + 15 * __temp_49__); float __temp_53__ = (__tilevar_3__[2][__iter_3__+1-__iter_0__]); float __temp_54__ = (__temp_50__ + 12 * __temp_53__); float __temp_57__ = (__tilevar_3__[2][__iter_3__+2-__iter_0__]); float __temp_58__ = (__temp_54__ + 5 * __temp_57__); float __temp_61__ = (__tilevar_3__[3][__iter_3__-2-__iter_0__]); float __temp_62__ = (__temp_58__ + 4 * __temp_61__); float __temp_65__ = (__tilevar_3__[3][__iter_3__-1-__iter_0__]); float __temp_66__ = (__temp_62__ + 9 * __temp_65__); float __temp_69__ = (__tilevar_3__[3][__iter_3__-__iter_0__]); float __temp_70__ = (__temp_66__ + 12 * __temp_69__); float __temp_73__ = (__tilevar_3__[3][__iter_3__+1-__iter_0__]); float __temp_74__ = (__temp_70__ + 9 * __temp_73__); float __temp_77__ = (__tilevar_3__[3][__iter_3__+2-__iter_0__]); float __temp_78__ = (__temp_74__ + 4 * __temp_77__); float __temp_81__ = (__tilevar_3__[4][__iter_3__-2-__iter_0__]); float __temp_82__ = (__temp_78__ + 2 * __temp_81__); float __temp_85__ = (__tilevar_3__[4][__iter_3__-1-__iter_0__]); float __temp_86__ = (__temp_82__ + 4 * __temp_85__); float __temp_89__ = (__tilevar_3__[4][__iter_3__-__iter_0__]); float __temp_90__ = (__temp_86__ + 5 * __temp_89__); float __temp_93__ = (__tilevar_3__[4][__iter_3__+1-__iter_0__]); float __temp_94__ = (__temp_90__ + 4 * __temp_93__); float __temp_97__ = (__tilevar_3__[4][__iter_3__+2-__iter_0__]); float __temp_98__ = (__temp_94__ + 2 * __temp_97__); float __temp_99__ = (__temp_98__ / 159); __var_1__[__iter_3__+M*FORMA_MAX(__iter_1__-6,0)] = __temp_99__; } } } /*Device code End */ /* Host Code Begin */ extern "C" void gaussian(float * h_input, int N, int M, float * __var_0__){ /* Host allocation Begin */ float * input; cudaMalloc(&input,sizeof(float)*(N*M)); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(float)*(N*M), memcpy_kind_h_input); } float * __var_1__; cudaMalloc(&__var_1__,sizeof(float)*(N*M)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = M; int __size_1___kernel___forma_kernel__0__ = N; int __block_0___kernel___forma_kernel__0__ = 128; int __block_1___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-16); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__, __size_1___kernel___forma_kernel__0__/16); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_1___kernel___forma_kernel__0__/16, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(float)*(N*M), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); } /*Host Free End*/
82b8ff9aeb7a62ec76d21ef2048a157cf63f38b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "svgf/svgf.h" #include "kernel/context.cuh" #include "kernel/light.cuh" #include "kernel/material.cuh" #include "kernel/intersect.cuh" #include "kernel/bvh.cuh" #include "kernel/StreamCompaction.h" #include "kernel/pt_common.h" #include "cuda/cudadefs.h" #include "cuda/helper_math.h" #include "cuda/cudautil.h" #include "cuda/cudamemory.h" #include "aten4idaten.h" inline __device__ float3 computeViewSpace( int ix, int iy, float centerDepth, int width, int height, const aten::mat4* mtxC2V) { // NOTE // Pview = (Xview, Yview, Zview, 1) // mtxV2C = W 0 0 0 // 0 H 0 0 // 0 0 A B // 0 0 -1 0 // mtxV2C * Pview = (Xclip, Yclip, Zclip, Wclip) = (Xclip, Yclip, Zclip, Zview) // Wclip = Zview = depth // Xscr = Xclip / Wclip = Xclip / Zview = Xclip / depth // Yscr = Yclip / Wclip = Yclip / Zview = Yclip / depth // // Xscr * depth = Xclip // Xview = mtxC2V * Xclip float2 uv = make_float2(ix + 0.5, iy + 0.5); uv /= make_float2(width - 1, height - 1); // [0, 1] uv = uv * 2.0f - 1.0f; // [0, 1] -> [-1, 1] aten::vec4 pos(uv.x, uv.y, 0, 0); // Screen-space -> Clip-space. pos.x *= centerDepth; pos.y *= centerDepth; // Clip-space -> View-space pos = mtxC2V->apply(pos); pos.z = -centerDepth; pos.w = 1.0; return make_float3(pos.x, pos.y, pos.z); } inline __device__ float C(float3 x1, float3 x2, float sigma) { float a = length(x1 - x2) / sigma; a *= a; return expf(-0.5f * a); } inline __device__ float C(float x1, float x2, float sigma) { float a = fabs(x1 - x2) / sigma; a *= a; return expf(-0.5f * a); } #define IS_IN_BOUND(x, a, b) ((a) <= (x) && (x) < (b)) __global__ void varianceEstimation( idaten::TileDomain tileDomain, hipSurfaceObject_t dst, const float4* __restrict__ aovNormalDepth, float4* aovMomentTemporalWeight, float4* aovColorVariance, float4* aovTexclrMeshid, aten::mat4 mtxC2V, int width, int height) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } ix += tileDomain.x; iy += tileDomain.y; const int idx = getIdx(ix, iy, width); auto normalDepth = aovNormalDepth[idx]; auto texclrMeshid = aovTexclrMeshid[idx]; auto momentTemporalWeight = aovMomentTemporalWeight[idx]; float centerDepth = aovNormalDepth[idx].w; int centerMeshId = (int)texclrMeshid.w; if (centerMeshId < 0) { // . aovMomentTemporalWeight[idx].x = 0; aovMomentTemporalWeight[idx].y = 0; aovMomentTemporalWeight[idx].z = 1; surf2Dwrite( make_float4(0), dst, ix * sizeof(float4), iy, hipBoundaryModeTrap); } float3 centerViewPos = computeViewSpace(ix, iy, centerDepth, width, height, &mtxC2V); float3 centerMoment = make_float3(momentTemporalWeight.x, momentTemporalWeight.y, momentTemporalWeight.z); int frame = (int)centerMoment.z; centerMoment /= centerMoment.z; // . float var = centerMoment.x - centerMoment.y * centerMoment.y; if (frame < 4) { // or Disoccluded. // 7x7birateral filter. static const int radius = 3; static const float sigmaN = 0.005f; static const float sigmaD = 0.005f; static const float sigmaS = 0.965f; float3 centerNormal = make_float3(normalDepth.x, normalDepth.y, normalDepth.z); float3 sum = make_float3(0); float weight = 0.0f; #if 0 for (int v = -radius; v <= radius; v++) { for (int u = -radius; u <= radius; u++) { #else static const int offsetx[] = { -3, -2, -1, 0, 1, 2, 3, -3, -2, -1, 0, 1, 2, 3, -3, -2, -1, 0, 1, 2, 3, -3, -2, -1, 0, 1, 2, 3, -3, -2, -1, 0, 1, 2, 3, -3, -2, -1, 0, 1, 2, 3, -3, -2, -1, 0, 1, 2, 3, }; static const int offsety[] = { -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, }; #pragma unroll for (int i = 0; i < 49; i++) { { int u = offsetx[i]; int v = offsety[i]; #endif int xx = clamp(ix + u, 0, width - 1); int yy = clamp(iy + v, 0, height - 1); int pidx = getIdx(xx, yy, width); normalDepth = aovNormalDepth[pidx]; texclrMeshid = aovTexclrMeshid[pidx]; momentTemporalWeight = aovMomentTemporalWeight[pidx]; float3 sampleNml = make_float3(normalDepth.x, normalDepth.y, normalDepth.z); float sampleDepth = normalDepth.w; int sampleMeshId = (int)texclrMeshid.w; float3 moment = make_float3(momentTemporalWeight.x, momentTemporalWeight.y, momentTemporalWeight.z); moment /= moment.z; #if 0 float n = 1 - dot(sampleNml, centerNormal); float Wn = exp(-0.5f * n * n / (sigmaN * sigmaN)); float d = 1 - min(centerDepth, sampleDepth) / max(centerDepth, sampleDepth); float Wd = exp(-0.5f * d * d / (sigmaD * sigmaD)); float Ws = exp(-0.5f * (u * u + v * v) / (sigmaS * sigmaS)); #elif 0 float Wn = 1.0f; { float normalCloseness = dot(sampleNml, centerNormal); normalCloseness = normalCloseness * normalCloseness; normalCloseness = normalCloseness * normalCloseness; float normalError = (1.0f - normalCloseness); Wn = max((1.0f - normalError), 0.0f); } float Wd = max(0.0f, 1.0f - fabs(centerDepth - sampleDepth)); float Ws = 1.0f; { auto sampleViewPos = computeViewSpace(ix + u, iy + v, sampleDepth, width, height, &mtxC2V); // Change in position in camera space. auto dq = centerViewPos - sampleViewPos; // How far away is this point from the original sample in camera space? (Max value is unbounded). auto dist2 = dot(dq, dq); // How far off the expected plane (on the perpendicular) is this point? Max value is unbounded. float err = max(fabs(dot(dq, sampleNml)), abs(dot(dq, centerNormal))); Ws = (dist2 < 0.001f) ? 1.0 : pow(max(0.0, 1.0 - 2.0 * err / sqrt(dist2)), 2.0); } #else float3 sampleViewPos = computeViewSpace(ix + u, iy + v, sampleDepth, width, height, &mtxC2V); float Wn = C(centerNormal, sampleNml, 0.1f); float Ws = C(centerViewPos, sampleViewPos, 0.1f); float Wd = C(centerDepth, sampleDepth, 0.1f); #endif float Wm = centerMeshId == sampleMeshId ? 1.0f : 0.0f; float W = Ws * Wn * Wd * Wm; sum += moment * W; weight += W; } } if (weight > 0.0f) { sum /= weight; } var = sum.x - sum.y * sum.y; } // TODO // var = fabs(var); aovColorVariance[idx].w = var; surf2Dwrite( make_float4(var, var, var, 1), dst, ix * sizeof(float4), iy, hipBoundaryModeTrap); } namespace idaten { void SVGFPathTracing::onVarianceEstimation( hipSurfaceObject_t outputSurf, int width, int height) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid( (m_tileDomain.w + block.x - 1) / block.x, (m_tileDomain.h + block.y - 1) / block.y); int curaov = getCurAovs(); varianceEstimation << <grid, block, 0, m_stream >> > ( //varianceEstimation << <1, 1 >> > ( m_tileDomain, outputSurf, m_aovNormalDepth[curaov].ptr(), m_aovMomentTemporalWeight[curaov].ptr(), m_aovColorVariance[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), m_mtxC2V, width, height); checkCudaKernel(varianceEstimation); } }
82b8ff9aeb7a62ec76d21ef2048a157cf63f38b5.cu
#include "svgf/svgf.h" #include "kernel/context.cuh" #include "kernel/light.cuh" #include "kernel/material.cuh" #include "kernel/intersect.cuh" #include "kernel/bvh.cuh" #include "kernel/StreamCompaction.h" #include "kernel/pt_common.h" #include "cuda/cudadefs.h" #include "cuda/helper_math.h" #include "cuda/cudautil.h" #include "cuda/cudamemory.h" #include "aten4idaten.h" inline __device__ float3 computeViewSpace( int ix, int iy, float centerDepth, int width, int height, const aten::mat4* mtxC2V) { // NOTE // Pview = (Xview, Yview, Zview, 1) // mtxV2C = W 0 0 0 // 0 H 0 0 // 0 0 A B // 0 0 -1 0 // mtxV2C * Pview = (Xclip, Yclip, Zclip, Wclip) = (Xclip, Yclip, Zclip, Zview) // Wclip = Zview = depth // Xscr = Xclip / Wclip = Xclip / Zview = Xclip / depth // Yscr = Yclip / Wclip = Yclip / Zview = Yclip / depth // // Xscr * depth = Xclip // Xview = mtxC2V * Xclip float2 uv = make_float2(ix + 0.5, iy + 0.5); uv /= make_float2(width - 1, height - 1); // [0, 1] uv = uv * 2.0f - 1.0f; // [0, 1] -> [-1, 1] aten::vec4 pos(uv.x, uv.y, 0, 0); // Screen-space -> Clip-space. pos.x *= centerDepth; pos.y *= centerDepth; // Clip-space -> View-space pos = mtxC2V->apply(pos); pos.z = -centerDepth; pos.w = 1.0; return make_float3(pos.x, pos.y, pos.z); } inline __device__ float C(float3 x1, float3 x2, float sigma) { float a = length(x1 - x2) / sigma; a *= a; return expf(-0.5f * a); } inline __device__ float C(float x1, float x2, float sigma) { float a = fabs(x1 - x2) / sigma; a *= a; return expf(-0.5f * a); } #define IS_IN_BOUND(x, a, b) ((a) <= (x) && (x) < (b)) __global__ void varianceEstimation( idaten::TileDomain tileDomain, cudaSurfaceObject_t dst, const float4* __restrict__ aovNormalDepth, float4* aovMomentTemporalWeight, float4* aovColorVariance, float4* aovTexclrMeshid, aten::mat4 mtxC2V, int width, int height) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } ix += tileDomain.x; iy += tileDomain.y; const int idx = getIdx(ix, iy, width); auto normalDepth = aovNormalDepth[idx]; auto texclrMeshid = aovTexclrMeshid[idx]; auto momentTemporalWeight = aovMomentTemporalWeight[idx]; float centerDepth = aovNormalDepth[idx].w; int centerMeshId = (int)texclrMeshid.w; if (centerMeshId < 0) { // 背景なので、分散はゼロ. aovMomentTemporalWeight[idx].x = 0; aovMomentTemporalWeight[idx].y = 0; aovMomentTemporalWeight[idx].z = 1; surf2Dwrite( make_float4(0), dst, ix * sizeof(float4), iy, cudaBoundaryModeTrap); } float3 centerViewPos = computeViewSpace(ix, iy, centerDepth, width, height, &mtxC2V); float3 centerMoment = make_float3(momentTemporalWeight.x, momentTemporalWeight.y, momentTemporalWeight.z); int frame = (int)centerMoment.z; centerMoment /= centerMoment.z; // 分散を計算. float var = centerMoment.x - centerMoment.y * centerMoment.y; if (frame < 4) { // 積算フレーム数が4未満 or Disoccludedされている. // 7x7birateral filterで輝度を計算. static const int radius = 3; static const float sigmaN = 0.005f; static const float sigmaD = 0.005f; static const float sigmaS = 0.965f; float3 centerNormal = make_float3(normalDepth.x, normalDepth.y, normalDepth.z); float3 sum = make_float3(0); float weight = 0.0f; #if 0 for (int v = -radius; v <= radius; v++) { for (int u = -radius; u <= radius; u++) { #else static const int offsetx[] = { -3, -2, -1, 0, 1, 2, 3, -3, -2, -1, 0, 1, 2, 3, -3, -2, -1, 0, 1, 2, 3, -3, -2, -1, 0, 1, 2, 3, -3, -2, -1, 0, 1, 2, 3, -3, -2, -1, 0, 1, 2, 3, -3, -2, -1, 0, 1, 2, 3, }; static const int offsety[] = { -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, }; #pragma unroll for (int i = 0; i < 49; i++) { { int u = offsetx[i]; int v = offsety[i]; #endif int xx = clamp(ix + u, 0, width - 1); int yy = clamp(iy + v, 0, height - 1); int pidx = getIdx(xx, yy, width); normalDepth = aovNormalDepth[pidx]; texclrMeshid = aovTexclrMeshid[pidx]; momentTemporalWeight = aovMomentTemporalWeight[pidx]; float3 sampleNml = make_float3(normalDepth.x, normalDepth.y, normalDepth.z); float sampleDepth = normalDepth.w; int sampleMeshId = (int)texclrMeshid.w; float3 moment = make_float3(momentTemporalWeight.x, momentTemporalWeight.y, momentTemporalWeight.z); moment /= moment.z; #if 0 float n = 1 - dot(sampleNml, centerNormal); float Wn = exp(-0.5f * n * n / (sigmaN * sigmaN)); float d = 1 - min(centerDepth, sampleDepth) / max(centerDepth, sampleDepth); float Wd = exp(-0.5f * d * d / (sigmaD * sigmaD)); float Ws = exp(-0.5f * (u * u + v * v) / (sigmaS * sigmaS)); #elif 0 float Wn = 1.0f; { float normalCloseness = dot(sampleNml, centerNormal); normalCloseness = normalCloseness * normalCloseness; normalCloseness = normalCloseness * normalCloseness; float normalError = (1.0f - normalCloseness); Wn = max((1.0f - normalError), 0.0f); } float Wd = max(0.0f, 1.0f - fabs(centerDepth - sampleDepth)); float Ws = 1.0f; { auto sampleViewPos = computeViewSpace(ix + u, iy + v, sampleDepth, width, height, &mtxC2V); // Change in position in camera space. auto dq = centerViewPos - sampleViewPos; // How far away is this point from the original sample in camera space? (Max value is unbounded). auto dist2 = dot(dq, dq); // How far off the expected plane (on the perpendicular) is this point? Max value is unbounded. float err = max(fabs(dot(dq, sampleNml)), abs(dot(dq, centerNormal))); Ws = (dist2 < 0.001f) ? 1.0 : pow(max(0.0, 1.0 - 2.0 * err / sqrt(dist2)), 2.0); } #else float3 sampleViewPos = computeViewSpace(ix + u, iy + v, sampleDepth, width, height, &mtxC2V); float Wn = C(centerNormal, sampleNml, 0.1f); float Ws = C(centerViewPos, sampleViewPos, 0.1f); float Wd = C(centerDepth, sampleDepth, 0.1f); #endif float Wm = centerMeshId == sampleMeshId ? 1.0f : 0.0f; float W = Ws * Wn * Wd * Wm; sum += moment * W; weight += W; } } if (weight > 0.0f) { sum /= weight; } var = sum.x - sum.y * sum.y; } // TODO // 分散はマイナスにならないが・・・・ var = fabs(var); aovColorVariance[idx].w = var; surf2Dwrite( make_float4(var, var, var, 1), dst, ix * sizeof(float4), iy, cudaBoundaryModeTrap); } namespace idaten { void SVGFPathTracing::onVarianceEstimation( cudaSurfaceObject_t outputSurf, int width, int height) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid( (m_tileDomain.w + block.x - 1) / block.x, (m_tileDomain.h + block.y - 1) / block.y); int curaov = getCurAovs(); varianceEstimation << <grid, block, 0, m_stream >> > ( //varianceEstimation << <1, 1 >> > ( m_tileDomain, outputSurf, m_aovNormalDepth[curaov].ptr(), m_aovMomentTemporalWeight[curaov].ptr(), m_aovColorVariance[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), m_mtxC2V, width, height); checkCudaKernel(varianceEstimation); } }
814df8d7020b2248a90d258ea996fb953e6239e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "internal_shared.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/limits.hpp" namespace cv { namespace gpu { namespace device { namespace stereocsbp { /////////////////////////////////////////////////////////////// /////////////////////// load constants //////////////////////// /////////////////////////////////////////////////////////////// __constant__ int cndisp; __constant__ float cmax_data_term; __constant__ float cdata_weight; __constant__ float cmax_disc_term; __constant__ float cdisc_single_jump; __constant__ int cth; __constant__ size_t cimg_step; __constant__ size_t cmsg_step1; __constant__ size_t cmsg_step2; __constant__ size_t cdisp_step1; __constant__ size_t cdisp_step2; __constant__ uchar* cleft; __constant__ uchar* cright; __constant__ uchar* ctemp; void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th, const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& temp) { cudaSafeCall( hipMemcpyToSymbol(cndisp, &ndisp, sizeof(int)) ); cudaSafeCall( hipMemcpyToSymbol(cmax_data_term, &max_data_term, sizeof(float)) ); cudaSafeCall( hipMemcpyToSymbol(cdata_weight, &data_weight, sizeof(float)) ); cudaSafeCall( hipMemcpyToSymbol(cmax_disc_term, &max_disc_term, sizeof(float)) ); cudaSafeCall( hipMemcpyToSymbol(cdisc_single_jump, &disc_single_jump, sizeof(float)) ); cudaSafeCall( hipMemcpyToSymbol(cth, &min_disp_th, sizeof(int)) ); cudaSafeCall( hipMemcpyToSymbol(cimg_step, &left.step, sizeof(size_t)) ); cudaSafeCall( hipMemcpyToSymbol(cleft, &left.data, sizeof(left.data)) ); cudaSafeCall( hipMemcpyToSymbol(cright, &right.data, sizeof(right.data)) ); cudaSafeCall( hipMemcpyToSymbol(ctemp, &temp.data, sizeof(temp.data)) ); } /////////////////////////////////////////////////////////////// /////////////////////// init data cost //////////////////////// /////////////////////////////////////////////////////////////// template <int channels> struct DataCostPerPixel; template <> struct DataCostPerPixel<1> { static __device__ __forceinline__ float compute(const uchar* left, const uchar* right) { return fmin(cdata_weight * ::abs((int)*left - *right), cdata_weight * cmax_data_term); } }; template <> struct DataCostPerPixel<3> { static __device__ __forceinline__ float compute(const uchar* left, const uchar* right) { float tb = 0.114f * ::abs((int)left[0] - right[0]); float tg = 0.587f * ::abs((int)left[1] - right[1]); float tr = 0.299f * ::abs((int)left[2] - right[2]); return fmin(cdata_weight * (tr + tg + tb), cdata_weight * cmax_data_term); } }; template <> struct DataCostPerPixel<4> { static __device__ __forceinline__ float compute(const uchar* left, const uchar* right) { uchar4 l = *((const uchar4*)left); uchar4 r = *((const uchar4*)right); float tb = 0.114f * ::abs((int)l.x - r.x); float tg = 0.587f * ::abs((int)l.y - r.y); float tr = 0.299f * ::abs((int)l.z - r.z); return fmin(cdata_weight * (tr + tg + tb), cdata_weight * cmax_data_term); } }; template <typename T> __global__ void get_first_k_initial_global(T* data_cost_selected_, T *selected_disp_pyr, int h, int w, int nr_plane) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { T* selected_disparity = selected_disp_pyr + y * cmsg_step1 + x; T* data_cost_selected = data_cost_selected_ + y * cmsg_step1 + x; T* data_cost = (T*)ctemp + y * cmsg_step1 + x; for(int i = 0; i < nr_plane; i++) { T minimum = device::numeric_limits<T>::max(); int id = 0; for(int d = 0; d < cndisp; d++) { T cur = data_cost[d * cdisp_step1]; if(cur < minimum) { minimum = cur; id = d; } } data_cost_selected[i * cdisp_step1] = minimum; selected_disparity[i * cdisp_step1] = id; data_cost [id * cdisp_step1] = numeric_limits<T>::max(); } } } template <typename T> __global__ void get_first_k_initial_local(T* data_cost_selected_, T* selected_disp_pyr, int h, int w, int nr_plane) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { T* selected_disparity = selected_disp_pyr + y * cmsg_step1 + x; T* data_cost_selected = data_cost_selected_ + y * cmsg_step1 + x; T* data_cost = (T*)ctemp + y * cmsg_step1 + x; int nr_local_minimum = 0; T prev = data_cost[0 * cdisp_step1]; T cur = data_cost[1 * cdisp_step1]; T next = data_cost[2 * cdisp_step1]; for (int d = 1; d < cndisp - 1 && nr_local_minimum < nr_plane; d++) { if (cur < prev && cur < next) { data_cost_selected[nr_local_minimum * cdisp_step1] = cur; selected_disparity[nr_local_minimum * cdisp_step1] = d; data_cost[d * cdisp_step1] = numeric_limits<T>::max(); nr_local_minimum++; } prev = cur; cur = next; next = data_cost[(d + 1) * cdisp_step1]; } for (int i = nr_local_minimum; i < nr_plane; i++) { T minimum = numeric_limits<T>::max(); int id = 0; for (int d = 0; d < cndisp; d++) { cur = data_cost[d * cdisp_step1]; if (cur < minimum) { minimum = cur; id = d; } } data_cost_selected[i * cdisp_step1] = minimum; selected_disparity[i * cdisp_step1] = id; data_cost[id * cdisp_step1] = numeric_limits<T>::max(); } } } template <typename T, int channels> __global__ void init_data_cost(int h, int w, int level) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { int y0 = y << level; int yt = (y + 1) << level; int x0 = x << level; int xt = (x + 1) << level; T* data_cost = (T*)ctemp + y * cmsg_step1 + x; for(int d = 0; d < cndisp; ++d) { float val = 0.0f; for(int yi = y0; yi < yt; yi++) { for(int xi = x0; xi < xt; xi++) { int xr = xi - d; if(d < cth || xr < 0) val += cdata_weight * cmax_data_term; else { const uchar* lle = cleft + yi * cimg_step + xi * channels; const uchar* lri = cright + yi * cimg_step + xr * channels; val += DataCostPerPixel<channels>::compute(lle, lri); } } } data_cost[cdisp_step1 * d] = saturate_cast<T>(val); } } } template <typename T, int winsz, int channels> __global__ void init_data_cost_reduce(int level, int rows, int cols, int h) { int x_out = blockIdx.x; int y_out = blockIdx.y % h; int d = (blockIdx.y / h) * blockDim.z + threadIdx.z; int tid = threadIdx.x; if (d < cndisp) { int x0 = x_out << level; int y0 = y_out << level; int len = ::min(y0 + winsz, rows) - y0; float val = 0.0f; if (x0 + tid < cols) { if (x0 + tid - d < 0 || d < cth) val = cdata_weight * cmax_data_term * len; else { const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid ); const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - d); for(int y = 0; y < len; ++y) { val += DataCostPerPixel<channels>::compute(lle, lri); lle += cimg_step; lri += cimg_step; } } } extern __shared__ float smem[]; float* dline = smem + winsz * threadIdx.z; dline[tid] = val; __syncthreads(); if (winsz >= 256) { if (tid < 128) { dline[tid] += dline[tid + 128]; } __syncthreads(); } if (winsz >= 128) { if (tid < 64) { dline[tid] += dline[tid + 64]; } __syncthreads(); } volatile float* vdline = smem + winsz * threadIdx.z; if (winsz >= 64) if (tid < 32) vdline[tid] += vdline[tid + 32]; if (winsz >= 32) if (tid < 16) vdline[tid] += vdline[tid + 16]; if (winsz >= 16) if (tid < 8) vdline[tid] += vdline[tid + 8]; if (winsz >= 8) if (tid < 4) vdline[tid] += vdline[tid + 4]; if (winsz >= 4) if (tid < 2) vdline[tid] += vdline[tid + 2]; if (winsz >= 2) if (tid < 1) vdline[tid] += vdline[tid + 1]; T* data_cost = (T*)ctemp + y_out * cmsg_step1 + x_out; if (tid == 0) data_cost[cdisp_step1 * d] = saturate_cast<T>(dline[0]); } } template <typename T> void init_data_cost_caller_(int /*rows*/, int /*cols*/, int h, int w, int level, int /*ndisp*/, int channels, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); switch (channels) { case 1:hipLaunchKernelGGL(( init_data_cost<T, 1>), dim3(grid), dim3(threads), 0, stream, h, w, level); break; case 3:hipLaunchKernelGGL(( init_data_cost<T, 3>), dim3(grid), dim3(threads), 0, stream, h, w, level); break; case 4:hipLaunchKernelGGL(( init_data_cost<T, 4>), dim3(grid), dim3(threads), 0, stream, h, w, level); break; default: cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "init_data_cost_caller_"); } } template <typename T, int winsz> void init_data_cost_reduce_caller_(int rows, int cols, int h, int w, int level, int ndisp, int channels, hipStream_t stream) { const int threadsNum = 256; const size_t smem_size = threadsNum * sizeof(float); dim3 threads(winsz, 1, threadsNum / winsz); dim3 grid(w, h, 1); grid.y *= divUp(ndisp, threads.z); switch (channels) { case 1:hipLaunchKernelGGL(( init_data_cost_reduce<T, winsz, 1>), dim3(grid), dim3(threads), smem_size, stream, level, rows, cols, h); break; case 3:hipLaunchKernelGGL(( init_data_cost_reduce<T, winsz, 3>), dim3(grid), dim3(threads), smem_size, stream, level, rows, cols, h); break; case 4:hipLaunchKernelGGL(( init_data_cost_reduce<T, winsz, 4>), dim3(grid), dim3(threads), smem_size, stream, level, rows, cols, h); break; default: cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "init_data_cost_reduce_caller_"); } } template<class T> void init_data_cost(int rows, int cols, T* disp_selected_pyr, T* data_cost_selected, size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, hipStream_t stream) { typedef void (*InitDataCostCaller)(int cols, int rows, int w, int h, int level, int ndisp, int channels, hipStream_t stream); static const InitDataCostCaller init_data_cost_callers[] = { init_data_cost_caller_<T>, init_data_cost_caller_<T>, init_data_cost_reduce_caller_<T, 4>, init_data_cost_reduce_caller_<T, 8>, init_data_cost_reduce_caller_<T, 16>, init_data_cost_reduce_caller_<T, 32>, init_data_cost_reduce_caller_<T, 64>, init_data_cost_reduce_caller_<T, 128>, init_data_cost_reduce_caller_<T, 256> }; size_t disp_step = msg_step * h; cudaSafeCall( hipMemcpyToSymbol(cdisp_step1, &disp_step, sizeof(size_t)) ); cudaSafeCall( hipMemcpyToSymbol(cmsg_step1, &msg_step, sizeof(size_t)) ); init_data_cost_callers[level](rows, cols, h, w, level, ndisp, channels, stream); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); if (use_local_init_data_cost == true) hipLaunchKernelGGL(( get_first_k_initial_local), dim3(grid), dim3(threads), 0, stream, data_cost_selected, disp_selected_pyr, h, w, nr_plane); else hipLaunchKernelGGL(( get_first_k_initial_global), dim3(grid), dim3(threads), 0, stream, data_cost_selected, disp_selected_pyr, h, w, nr_plane); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void init_data_cost(int rows, int cols, short* disp_selected_pyr, short* data_cost_selected, size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, hipStream_t stream); template void init_data_cost(int rows, int cols, float* disp_selected_pyr, float* data_cost_selected, size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, hipStream_t stream); /////////////////////////////////////////////////////////////// ////////////////////// compute data cost ////////////////////// /////////////////////////////////////////////////////////////// template <typename T, int channels> __global__ void compute_data_cost(const T* selected_disp_pyr, T* data_cost_, int h, int w, int level, int nr_plane) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { int y0 = y << level; int yt = (y + 1) << level; int x0 = x << level; int xt = (x + 1) << level; const T* selected_disparity = selected_disp_pyr + y/2 * cmsg_step2 + x/2; T* data_cost = data_cost_ + y * cmsg_step1 + x; for(int d = 0; d < nr_plane; d++) { float val = 0.0f; for(int yi = y0; yi < yt; yi++) { for(int xi = x0; xi < xt; xi++) { int sel_disp = selected_disparity[d * cdisp_step2]; int xr = xi - sel_disp; if (xr < 0 || sel_disp < cth) val += cdata_weight * cmax_data_term; else { const uchar* left_x = cleft + yi * cimg_step + xi * channels; const uchar* right_x = cright + yi * cimg_step + xr * channels; val += DataCostPerPixel<channels>::compute(left_x, right_x); } } } data_cost[cdisp_step1 * d] = saturate_cast<T>(val); } } } template <typename T, int winsz, int channels> __global__ void compute_data_cost_reduce(const T* selected_disp_pyr, T* data_cost_, int level, int rows, int cols, int h, int nr_plane) { int x_out = blockIdx.x; int y_out = blockIdx.y % h; int d = (blockIdx.y / h) * blockDim.z + threadIdx.z; int tid = threadIdx.x; const T* selected_disparity = selected_disp_pyr + y_out/2 * cmsg_step2 + x_out/2; T* data_cost = data_cost_ + y_out * cmsg_step1 + x_out; if (d < nr_plane) { int sel_disp = selected_disparity[d * cdisp_step2]; int x0 = x_out << level; int y0 = y_out << level; int len = ::min(y0 + winsz, rows) - y0; float val = 0.0f; if (x0 + tid < cols) { if (x0 + tid - sel_disp < 0 || sel_disp < cth) val = cdata_weight * cmax_data_term * len; else { const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid ); const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - sel_disp); for(int y = 0; y < len; ++y) { val += DataCostPerPixel<channels>::compute(lle, lri); lle += cimg_step; lri += cimg_step; } } } extern __shared__ float smem[]; float* dline = smem + winsz * threadIdx.z; dline[tid] = val; __syncthreads(); if (winsz >= 256) { if (tid < 128) { dline[tid] += dline[tid + 128]; } __syncthreads(); } if (winsz >= 128) { if (tid < 64) { dline[tid] += dline[tid + 64]; } __syncthreads(); } volatile float* vdline = smem + winsz * threadIdx.z; if (winsz >= 64) if (tid < 32) vdline[tid] += vdline[tid + 32]; if (winsz >= 32) if (tid < 16) vdline[tid] += vdline[tid + 16]; if (winsz >= 16) if (tid < 8) vdline[tid] += vdline[tid + 8]; if (winsz >= 8) if (tid < 4) vdline[tid] += vdline[tid + 4]; if (winsz >= 4) if (tid < 2) vdline[tid] += vdline[tid + 2]; if (winsz >= 2) if (tid < 1) vdline[tid] += vdline[tid + 1]; if (tid == 0) data_cost[cdisp_step1 * d] = saturate_cast<T>(dline[0]); } } template <typename T> void compute_data_cost_caller_(const T* disp_selected_pyr, T* data_cost, int /*rows*/, int /*cols*/, int h, int w, int level, int nr_plane, int channels, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); switch(channels) { case 1:hipLaunchKernelGGL(( compute_data_cost<T, 1>), dim3(grid), dim3(threads), 0, stream, disp_selected_pyr, data_cost, h, w, level, nr_plane); break; case 3:hipLaunchKernelGGL(( compute_data_cost<T, 3>), dim3(grid), dim3(threads), 0, stream, disp_selected_pyr, data_cost, h, w, level, nr_plane); break; case 4:hipLaunchKernelGGL(( compute_data_cost<T, 4>), dim3(grid), dim3(threads), 0, stream, disp_selected_pyr, data_cost, h, w, level, nr_plane); break; default: cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "compute_data_cost_caller_"); } } template <typename T, int winsz> void compute_data_cost_reduce_caller_(const T* disp_selected_pyr, T* data_cost, int rows, int cols, int h, int w, int level, int nr_plane, int channels, hipStream_t stream) { const int threadsNum = 256; const size_t smem_size = threadsNum * sizeof(float); dim3 threads(winsz, 1, threadsNum / winsz); dim3 grid(w, h, 1); grid.y *= divUp(nr_plane, threads.z); switch (channels) { case 1:hipLaunchKernelGGL(( compute_data_cost_reduce<T, winsz, 1>), dim3(grid), dim3(threads), smem_size, stream, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane); break; case 3:hipLaunchKernelGGL(( compute_data_cost_reduce<T, winsz, 3>), dim3(grid), dim3(threads), smem_size, stream, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane); break; case 4:hipLaunchKernelGGL(( compute_data_cost_reduce<T, winsz, 4>), dim3(grid), dim3(threads), smem_size, stream, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane); break; default: cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "compute_data_cost_reduce_caller_"); } } template<class T> void compute_data_cost(const T* disp_selected_pyr, T* data_cost, size_t msg_step1, size_t msg_step2, int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, hipStream_t stream) { typedef void (*ComputeDataCostCaller)(const T* disp_selected_pyr, T* data_cost, int rows, int cols, int h, int w, int level, int nr_plane, int channels, hipStream_t stream); static const ComputeDataCostCaller callers[] = { compute_data_cost_caller_<T>, compute_data_cost_caller_<T>, compute_data_cost_reduce_caller_<T, 4>, compute_data_cost_reduce_caller_<T, 8>, compute_data_cost_reduce_caller_<T, 16>, compute_data_cost_reduce_caller_<T, 32>, compute_data_cost_reduce_caller_<T, 64>, compute_data_cost_reduce_caller_<T, 128>, compute_data_cost_reduce_caller_<T, 256> }; size_t disp_step1 = msg_step1 * h; size_t disp_step2 = msg_step2 * h2; cudaSafeCall( hipMemcpyToSymbol(cdisp_step1, &disp_step1, sizeof(size_t)) ); cudaSafeCall( hipMemcpyToSymbol(cdisp_step2, &disp_step2, sizeof(size_t)) ); cudaSafeCall( hipMemcpyToSymbol(cmsg_step1, &msg_step1, sizeof(size_t)) ); cudaSafeCall( hipMemcpyToSymbol(cmsg_step2, &msg_step2, sizeof(size_t)) ); callers[level](disp_selected_pyr, data_cost, rows, cols, h, w, level, nr_plane, channels, stream); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void compute_data_cost(const short* disp_selected_pyr, short* data_cost, size_t msg_step1, size_t msg_step2, int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, hipStream_t stream); template void compute_data_cost(const float* disp_selected_pyr, float* data_cost, size_t msg_step1, size_t msg_step2, int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, hipStream_t stream); /////////////////////////////////////////////////////////////// //////////////////////// init message ///////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __device__ void get_first_k_element_increase(T* u_new, T* d_new, T* l_new, T* r_new, const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur, T* data_cost_selected, T* disparity_selected_new, T* data_cost_new, const T* data_cost_cur, const T* disparity_selected_cur, int nr_plane, int nr_plane2) { for(int i = 0; i < nr_plane; i++) { T minimum = numeric_limits<T>::max(); int id = 0; for(int j = 0; j < nr_plane2; j++) { T cur = data_cost_new[j * cdisp_step1]; if(cur < minimum) { minimum = cur; id = j; } } data_cost_selected[i * cdisp_step1] = data_cost_cur[id * cdisp_step1]; disparity_selected_new[i * cdisp_step1] = disparity_selected_cur[id * cdisp_step2]; u_new[i * cdisp_step1] = u_cur[id * cdisp_step2]; d_new[i * cdisp_step1] = d_cur[id * cdisp_step2]; l_new[i * cdisp_step1] = l_cur[id * cdisp_step2]; r_new[i * cdisp_step1] = r_cur[id * cdisp_step2]; data_cost_new[id * cdisp_step1] = numeric_limits<T>::max(); } } template <typename T> __global__ void init_message(T* u_new_, T* d_new_, T* l_new_, T* r_new_, const T* u_cur_, const T* d_cur_, const T* l_cur_, const T* r_cur_, T* selected_disp_pyr_new, const T* selected_disp_pyr_cur, T* data_cost_selected_, const T* data_cost_, int h, int w, int nr_plane, int h2, int w2, int nr_plane2) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { const T* u_cur = u_cur_ + ::min(h2-1, y/2 + 1) * cmsg_step2 + x/2; const T* d_cur = d_cur_ + ::max(0, y/2 - 1) * cmsg_step2 + x/2; const T* l_cur = l_cur_ + y/2 * cmsg_step2 + ::min(w2-1, x/2 + 1); const T* r_cur = r_cur_ + y/2 * cmsg_step2 + ::max(0, x/2 - 1); T* data_cost_new = (T*)ctemp + y * cmsg_step1 + x; const T* disparity_selected_cur = selected_disp_pyr_cur + y/2 * cmsg_step2 + x/2; const T* data_cost = data_cost_ + y * cmsg_step1 + x; for(int d = 0; d < nr_plane2; d++) { int idx2 = d * cdisp_step2; T val = data_cost[d * cdisp_step1] + u_cur[idx2] + d_cur[idx2] + l_cur[idx2] + r_cur[idx2]; data_cost_new[d * cdisp_step1] = val; } T* data_cost_selected = data_cost_selected_ + y * cmsg_step1 + x; T* disparity_selected_new = selected_disp_pyr_new + y * cmsg_step1 + x; T* u_new = u_new_ + y * cmsg_step1 + x; T* d_new = d_new_ + y * cmsg_step1 + x; T* l_new = l_new_ + y * cmsg_step1 + x; T* r_new = r_new_ + y * cmsg_step1 + x; u_cur = u_cur_ + y/2 * cmsg_step2 + x/2; d_cur = d_cur_ + y/2 * cmsg_step2 + x/2; l_cur = l_cur_ + y/2 * cmsg_step2 + x/2; r_cur = r_cur_ + y/2 * cmsg_step2 + x/2; get_first_k_element_increase(u_new, d_new, l_new, r_new, u_cur, d_cur, l_cur, r_cur, data_cost_selected, disparity_selected_new, data_cost_new, data_cost, disparity_selected_cur, nr_plane, nr_plane2); } } template<class T> void init_message(T* u_new, T* d_new, T* l_new, T* r_new, const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur, T* selected_disp_pyr_new, const T* selected_disp_pyr_cur, T* data_cost_selected, const T* data_cost, size_t msg_step1, size_t msg_step2, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, hipStream_t stream) { size_t disp_step1 = msg_step1 * h; size_t disp_step2 = msg_step2 * h2; cudaSafeCall( hipMemcpyToSymbol(cdisp_step1, &disp_step1, sizeof(size_t)) ); cudaSafeCall( hipMemcpyToSymbol(cdisp_step2, &disp_step2, sizeof(size_t)) ); cudaSafeCall( hipMemcpyToSymbol(cmsg_step1, &msg_step1, sizeof(size_t)) ); cudaSafeCall( hipMemcpyToSymbol(cmsg_step2, &msg_step2, sizeof(size_t)) ); dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); hipLaunchKernelGGL(( init_message), dim3(grid), dim3(threads), 0, stream, u_new, d_new, l_new, r_new, u_cur, d_cur, l_cur, r_cur, selected_disp_pyr_new, selected_disp_pyr_cur, data_cost_selected, data_cost, h, w, nr_plane, h2, w2, nr_plane2); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void init_message(short* u_new, short* d_new, short* l_new, short* r_new, const short* u_cur, const short* d_cur, const short* l_cur, const short* r_cur, short* selected_disp_pyr_new, const short* selected_disp_pyr_cur, short* data_cost_selected, const short* data_cost, size_t msg_step1, size_t msg_step2, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, hipStream_t stream); template void init_message(float* u_new, float* d_new, float* l_new, float* r_new, const float* u_cur, const float* d_cur, const float* l_cur, const float* r_cur, float* selected_disp_pyr_new, const float* selected_disp_pyr_cur, float* data_cost_selected, const float* data_cost, size_t msg_step1, size_t msg_step2, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, hipStream_t stream); /////////////////////////////////////////////////////////////// //////////////////// calc all iterations ///////////////////// /////////////////////////////////////////////////////////////// template <typename T> __device__ void message_per_pixel(const T* data, T* msg_dst, const T* msg1, const T* msg2, const T* msg3, const T* dst_disp, const T* src_disp, int nr_plane, T* temp) { T minimum = numeric_limits<T>::max(); for(int d = 0; d < nr_plane; d++) { int idx = d * cdisp_step1; T val = data[idx] + msg1[idx] + msg2[idx] + msg3[idx]; if(val < minimum) minimum = val; msg_dst[idx] = val; } float sum = 0; for(int d = 0; d < nr_plane; d++) { float cost_min = minimum + cmax_disc_term; T src_disp_reg = src_disp[d * cdisp_step1]; for(int d2 = 0; d2 < nr_plane; d2++) cost_min = fmin(cost_min, msg_dst[d2 * cdisp_step1] + cdisc_single_jump * ::abs(dst_disp[d2 * cdisp_step1] - src_disp_reg)); temp[d * cdisp_step1] = saturate_cast<T>(cost_min); sum += cost_min; } sum /= nr_plane; for(int d = 0; d < nr_plane; d++) msg_dst[d * cdisp_step1] = saturate_cast<T>(temp[d * cdisp_step1] - sum); } template <typename T> __global__ void compute_message(T* u_, T* d_, T* l_, T* r_, const T* data_cost_selected, const T* selected_disp_pyr_cur, int h, int w, int nr_plane, int i) { int y = blockIdx.y * blockDim.y + threadIdx.y; int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + i) & 1); if (y > 0 && y < h - 1 && x > 0 && x < w - 1) { const T* data = data_cost_selected + y * cmsg_step1 + x; T* u = u_ + y * cmsg_step1 + x; T* d = d_ + y * cmsg_step1 + x; T* l = l_ + y * cmsg_step1 + x; T* r = r_ + y * cmsg_step1 + x; const T* disp = selected_disp_pyr_cur + y * cmsg_step1 + x; T* temp = (T*)ctemp + y * cmsg_step1 + x; message_per_pixel(data, u, r - 1, u + cmsg_step1, l + 1, disp, disp - cmsg_step1, nr_plane, temp); message_per_pixel(data, d, d - cmsg_step1, r - 1, l + 1, disp, disp + cmsg_step1, nr_plane, temp); message_per_pixel(data, l, u + cmsg_step1, d - cmsg_step1, l + 1, disp, disp - 1, nr_plane, temp); message_per_pixel(data, r, u + cmsg_step1, d - cmsg_step1, r - 1, disp, disp + 1, nr_plane, temp); } } template<class T> void calc_all_iterations(T* u, T* d, T* l, T* r, const T* data_cost_selected, const T* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, hipStream_t stream) { size_t disp_step = msg_step * h; cudaSafeCall( hipMemcpyToSymbol(cdisp_step1, &disp_step, sizeof(size_t)) ); cudaSafeCall( hipMemcpyToSymbol(cmsg_step1, &msg_step, sizeof(size_t)) ); dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x << 1); grid.y = divUp(h, threads.y); for(int t = 0; t < iters; ++t) { hipLaunchKernelGGL(( compute_message), dim3(grid), dim3(threads), 0, stream, u, d, l, r, data_cost_selected, selected_disp_pyr_cur, h, w, nr_plane, t & 1); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } }; template void calc_all_iterations(short* u, short* d, short* l, short* r, const short* data_cost_selected, const short* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, hipStream_t stream); template void calc_all_iterations(float* u, float* d, float* l, float* r, const float* data_cost_selected, const float* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, hipStream_t stream); /////////////////////////////////////////////////////////////// /////////////////////////// output //////////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __global__ void compute_disp(const T* u_, const T* d_, const T* l_, const T* r_, const T* data_cost_selected, const T* disp_selected_pyr, short* disp, size_t res_step, int cols, int rows, int nr_plane) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y > 0 && y < rows - 1 && x > 0 && x < cols - 1) { const T* data = data_cost_selected + y * cmsg_step1 + x; const T* disp_selected = disp_selected_pyr + y * cmsg_step1 + x; const T* u = u_ + (y+1) * cmsg_step1 + (x+0); const T* d = d_ + (y-1) * cmsg_step1 + (x+0); const T* l = l_ + (y+0) * cmsg_step1 + (x+1); const T* r = r_ + (y+0) * cmsg_step1 + (x-1); int best = 0; T best_val = numeric_limits<T>::max(); for (int i = 0; i < nr_plane; ++i) { int idx = i * cdisp_step1; T val = data[idx]+ u[idx] + d[idx] + l[idx] + r[idx]; if (val < best_val) { best_val = val; best = saturate_cast<short>(disp_selected[idx]); } } disp[res_step * y + x] = best; } } template<class T> void compute_disp(const T* u, const T* d, const T* l, const T* r, const T* data_cost_selected, const T* disp_selected, size_t msg_step, const DevMem2D_<short>& disp, int nr_plane, hipStream_t stream) { size_t disp_step = disp.rows * msg_step; cudaSafeCall( hipMemcpyToSymbol(cdisp_step1, &disp_step, sizeof(size_t)) ); cudaSafeCall( hipMemcpyToSymbol(cmsg_step1, &msg_step, sizeof(size_t)) ); dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(disp.cols, threads.x); grid.y = divUp(disp.rows, threads.y); hipLaunchKernelGGL(( compute_disp), dim3(grid), dim3(threads), 0, stream, u, d, l, r, data_cost_selected, disp_selected, disp.data, disp.step / disp.elemSize(), disp.cols, disp.rows, nr_plane); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void compute_disp(const short* u, const short* d, const short* l, const short* r, const short* data_cost_selected, const short* disp_selected, size_t msg_step, const DevMem2D_<short>& disp, int nr_plane, hipStream_t stream); template void compute_disp(const float* u, const float* d, const float* l, const float* r, const float* data_cost_selected, const float* disp_selected, size_t msg_step, const DevMem2D_<short>& disp, int nr_plane, hipStream_t stream); } // namespace stereocsbp }}} // namespace cv { namespace gpu { namespace device {
814df8d7020b2248a90d258ea996fb953e6239e5.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "internal_shared.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/limits.hpp" namespace cv { namespace gpu { namespace device { namespace stereocsbp { /////////////////////////////////////////////////////////////// /////////////////////// load constants //////////////////////// /////////////////////////////////////////////////////////////// __constant__ int cndisp; __constant__ float cmax_data_term; __constant__ float cdata_weight; __constant__ float cmax_disc_term; __constant__ float cdisc_single_jump; __constant__ int cth; __constant__ size_t cimg_step; __constant__ size_t cmsg_step1; __constant__ size_t cmsg_step2; __constant__ size_t cdisp_step1; __constant__ size_t cdisp_step2; __constant__ uchar* cleft; __constant__ uchar* cright; __constant__ uchar* ctemp; void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th, const DevMem2Db& left, const DevMem2Db& right, const DevMem2Db& temp) { cudaSafeCall( cudaMemcpyToSymbol(cndisp, &ndisp, sizeof(int)) ); cudaSafeCall( cudaMemcpyToSymbol(cmax_data_term, &max_data_term, sizeof(float)) ); cudaSafeCall( cudaMemcpyToSymbol(cdata_weight, &data_weight, sizeof(float)) ); cudaSafeCall( cudaMemcpyToSymbol(cmax_disc_term, &max_disc_term, sizeof(float)) ); cudaSafeCall( cudaMemcpyToSymbol(cdisc_single_jump, &disc_single_jump, sizeof(float)) ); cudaSafeCall( cudaMemcpyToSymbol(cth, &min_disp_th, sizeof(int)) ); cudaSafeCall( cudaMemcpyToSymbol(cimg_step, &left.step, sizeof(size_t)) ); cudaSafeCall( cudaMemcpyToSymbol(cleft, &left.data, sizeof(left.data)) ); cudaSafeCall( cudaMemcpyToSymbol(cright, &right.data, sizeof(right.data)) ); cudaSafeCall( cudaMemcpyToSymbol(ctemp, &temp.data, sizeof(temp.data)) ); } /////////////////////////////////////////////////////////////// /////////////////////// init data cost //////////////////////// /////////////////////////////////////////////////////////////// template <int channels> struct DataCostPerPixel; template <> struct DataCostPerPixel<1> { static __device__ __forceinline__ float compute(const uchar* left, const uchar* right) { return fmin(cdata_weight * ::abs((int)*left - *right), cdata_weight * cmax_data_term); } }; template <> struct DataCostPerPixel<3> { static __device__ __forceinline__ float compute(const uchar* left, const uchar* right) { float tb = 0.114f * ::abs((int)left[0] - right[0]); float tg = 0.587f * ::abs((int)left[1] - right[1]); float tr = 0.299f * ::abs((int)left[2] - right[2]); return fmin(cdata_weight * (tr + tg + tb), cdata_weight * cmax_data_term); } }; template <> struct DataCostPerPixel<4> { static __device__ __forceinline__ float compute(const uchar* left, const uchar* right) { uchar4 l = *((const uchar4*)left); uchar4 r = *((const uchar4*)right); float tb = 0.114f * ::abs((int)l.x - r.x); float tg = 0.587f * ::abs((int)l.y - r.y); float tr = 0.299f * ::abs((int)l.z - r.z); return fmin(cdata_weight * (tr + tg + tb), cdata_weight * cmax_data_term); } }; template <typename T> __global__ void get_first_k_initial_global(T* data_cost_selected_, T *selected_disp_pyr, int h, int w, int nr_plane) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { T* selected_disparity = selected_disp_pyr + y * cmsg_step1 + x; T* data_cost_selected = data_cost_selected_ + y * cmsg_step1 + x; T* data_cost = (T*)ctemp + y * cmsg_step1 + x; for(int i = 0; i < nr_plane; i++) { T minimum = device::numeric_limits<T>::max(); int id = 0; for(int d = 0; d < cndisp; d++) { T cur = data_cost[d * cdisp_step1]; if(cur < minimum) { minimum = cur; id = d; } } data_cost_selected[i * cdisp_step1] = minimum; selected_disparity[i * cdisp_step1] = id; data_cost [id * cdisp_step1] = numeric_limits<T>::max(); } } } template <typename T> __global__ void get_first_k_initial_local(T* data_cost_selected_, T* selected_disp_pyr, int h, int w, int nr_plane) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { T* selected_disparity = selected_disp_pyr + y * cmsg_step1 + x; T* data_cost_selected = data_cost_selected_ + y * cmsg_step1 + x; T* data_cost = (T*)ctemp + y * cmsg_step1 + x; int nr_local_minimum = 0; T prev = data_cost[0 * cdisp_step1]; T cur = data_cost[1 * cdisp_step1]; T next = data_cost[2 * cdisp_step1]; for (int d = 1; d < cndisp - 1 && nr_local_minimum < nr_plane; d++) { if (cur < prev && cur < next) { data_cost_selected[nr_local_minimum * cdisp_step1] = cur; selected_disparity[nr_local_minimum * cdisp_step1] = d; data_cost[d * cdisp_step1] = numeric_limits<T>::max(); nr_local_minimum++; } prev = cur; cur = next; next = data_cost[(d + 1) * cdisp_step1]; } for (int i = nr_local_minimum; i < nr_plane; i++) { T minimum = numeric_limits<T>::max(); int id = 0; for (int d = 0; d < cndisp; d++) { cur = data_cost[d * cdisp_step1]; if (cur < minimum) { minimum = cur; id = d; } } data_cost_selected[i * cdisp_step1] = minimum; selected_disparity[i * cdisp_step1] = id; data_cost[id * cdisp_step1] = numeric_limits<T>::max(); } } } template <typename T, int channels> __global__ void init_data_cost(int h, int w, int level) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { int y0 = y << level; int yt = (y + 1) << level; int x0 = x << level; int xt = (x + 1) << level; T* data_cost = (T*)ctemp + y * cmsg_step1 + x; for(int d = 0; d < cndisp; ++d) { float val = 0.0f; for(int yi = y0; yi < yt; yi++) { for(int xi = x0; xi < xt; xi++) { int xr = xi - d; if(d < cth || xr < 0) val += cdata_weight * cmax_data_term; else { const uchar* lle = cleft + yi * cimg_step + xi * channels; const uchar* lri = cright + yi * cimg_step + xr * channels; val += DataCostPerPixel<channels>::compute(lle, lri); } } } data_cost[cdisp_step1 * d] = saturate_cast<T>(val); } } } template <typename T, int winsz, int channels> __global__ void init_data_cost_reduce(int level, int rows, int cols, int h) { int x_out = blockIdx.x; int y_out = blockIdx.y % h; int d = (blockIdx.y / h) * blockDim.z + threadIdx.z; int tid = threadIdx.x; if (d < cndisp) { int x0 = x_out << level; int y0 = y_out << level; int len = ::min(y0 + winsz, rows) - y0; float val = 0.0f; if (x0 + tid < cols) { if (x0 + tid - d < 0 || d < cth) val = cdata_weight * cmax_data_term * len; else { const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid ); const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - d); for(int y = 0; y < len; ++y) { val += DataCostPerPixel<channels>::compute(lle, lri); lle += cimg_step; lri += cimg_step; } } } extern __shared__ float smem[]; float* dline = smem + winsz * threadIdx.z; dline[tid] = val; __syncthreads(); if (winsz >= 256) { if (tid < 128) { dline[tid] += dline[tid + 128]; } __syncthreads(); } if (winsz >= 128) { if (tid < 64) { dline[tid] += dline[tid + 64]; } __syncthreads(); } volatile float* vdline = smem + winsz * threadIdx.z; if (winsz >= 64) if (tid < 32) vdline[tid] += vdline[tid + 32]; if (winsz >= 32) if (tid < 16) vdline[tid] += vdline[tid + 16]; if (winsz >= 16) if (tid < 8) vdline[tid] += vdline[tid + 8]; if (winsz >= 8) if (tid < 4) vdline[tid] += vdline[tid + 4]; if (winsz >= 4) if (tid < 2) vdline[tid] += vdline[tid + 2]; if (winsz >= 2) if (tid < 1) vdline[tid] += vdline[tid + 1]; T* data_cost = (T*)ctemp + y_out * cmsg_step1 + x_out; if (tid == 0) data_cost[cdisp_step1 * d] = saturate_cast<T>(dline[0]); } } template <typename T> void init_data_cost_caller_(int /*rows*/, int /*cols*/, int h, int w, int level, int /*ndisp*/, int channels, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); switch (channels) { case 1: init_data_cost<T, 1><<<grid, threads, 0, stream>>>(h, w, level); break; case 3: init_data_cost<T, 3><<<grid, threads, 0, stream>>>(h, w, level); break; case 4: init_data_cost<T, 4><<<grid, threads, 0, stream>>>(h, w, level); break; default: cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "init_data_cost_caller_"); } } template <typename T, int winsz> void init_data_cost_reduce_caller_(int rows, int cols, int h, int w, int level, int ndisp, int channels, cudaStream_t stream) { const int threadsNum = 256; const size_t smem_size = threadsNum * sizeof(float); dim3 threads(winsz, 1, threadsNum / winsz); dim3 grid(w, h, 1); grid.y *= divUp(ndisp, threads.z); switch (channels) { case 1: init_data_cost_reduce<T, winsz, 1><<<grid, threads, smem_size, stream>>>(level, rows, cols, h); break; case 3: init_data_cost_reduce<T, winsz, 3><<<grid, threads, smem_size, stream>>>(level, rows, cols, h); break; case 4: init_data_cost_reduce<T, winsz, 4><<<grid, threads, smem_size, stream>>>(level, rows, cols, h); break; default: cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "init_data_cost_reduce_caller_"); } } template<class T> void init_data_cost(int rows, int cols, T* disp_selected_pyr, T* data_cost_selected, size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, cudaStream_t stream) { typedef void (*InitDataCostCaller)(int cols, int rows, int w, int h, int level, int ndisp, int channels, cudaStream_t stream); static const InitDataCostCaller init_data_cost_callers[] = { init_data_cost_caller_<T>, init_data_cost_caller_<T>, init_data_cost_reduce_caller_<T, 4>, init_data_cost_reduce_caller_<T, 8>, init_data_cost_reduce_caller_<T, 16>, init_data_cost_reduce_caller_<T, 32>, init_data_cost_reduce_caller_<T, 64>, init_data_cost_reduce_caller_<T, 128>, init_data_cost_reduce_caller_<T, 256> }; size_t disp_step = msg_step * h; cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step, sizeof(size_t)) ); cudaSafeCall( cudaMemcpyToSymbol(cmsg_step1, &msg_step, sizeof(size_t)) ); init_data_cost_callers[level](rows, cols, h, w, level, ndisp, channels, stream); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); if (use_local_init_data_cost == true) get_first_k_initial_local<<<grid, threads, 0, stream>>> (data_cost_selected, disp_selected_pyr, h, w, nr_plane); else get_first_k_initial_global<<<grid, threads, 0, stream>>>(data_cost_selected, disp_selected_pyr, h, w, nr_plane); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void init_data_cost(int rows, int cols, short* disp_selected_pyr, short* data_cost_selected, size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, cudaStream_t stream); template void init_data_cost(int rows, int cols, float* disp_selected_pyr, float* data_cost_selected, size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, cudaStream_t stream); /////////////////////////////////////////////////////////////// ////////////////////// compute data cost ////////////////////// /////////////////////////////////////////////////////////////// template <typename T, int channels> __global__ void compute_data_cost(const T* selected_disp_pyr, T* data_cost_, int h, int w, int level, int nr_plane) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { int y0 = y << level; int yt = (y + 1) << level; int x0 = x << level; int xt = (x + 1) << level; const T* selected_disparity = selected_disp_pyr + y/2 * cmsg_step2 + x/2; T* data_cost = data_cost_ + y * cmsg_step1 + x; for(int d = 0; d < nr_plane; d++) { float val = 0.0f; for(int yi = y0; yi < yt; yi++) { for(int xi = x0; xi < xt; xi++) { int sel_disp = selected_disparity[d * cdisp_step2]; int xr = xi - sel_disp; if (xr < 0 || sel_disp < cth) val += cdata_weight * cmax_data_term; else { const uchar* left_x = cleft + yi * cimg_step + xi * channels; const uchar* right_x = cright + yi * cimg_step + xr * channels; val += DataCostPerPixel<channels>::compute(left_x, right_x); } } } data_cost[cdisp_step1 * d] = saturate_cast<T>(val); } } } template <typename T, int winsz, int channels> __global__ void compute_data_cost_reduce(const T* selected_disp_pyr, T* data_cost_, int level, int rows, int cols, int h, int nr_plane) { int x_out = blockIdx.x; int y_out = blockIdx.y % h; int d = (blockIdx.y / h) * blockDim.z + threadIdx.z; int tid = threadIdx.x; const T* selected_disparity = selected_disp_pyr + y_out/2 * cmsg_step2 + x_out/2; T* data_cost = data_cost_ + y_out * cmsg_step1 + x_out; if (d < nr_plane) { int sel_disp = selected_disparity[d * cdisp_step2]; int x0 = x_out << level; int y0 = y_out << level; int len = ::min(y0 + winsz, rows) - y0; float val = 0.0f; if (x0 + tid < cols) { if (x0 + tid - sel_disp < 0 || sel_disp < cth) val = cdata_weight * cmax_data_term * len; else { const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid ); const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - sel_disp); for(int y = 0; y < len; ++y) { val += DataCostPerPixel<channels>::compute(lle, lri); lle += cimg_step; lri += cimg_step; } } } extern __shared__ float smem[]; float* dline = smem + winsz * threadIdx.z; dline[tid] = val; __syncthreads(); if (winsz >= 256) { if (tid < 128) { dline[tid] += dline[tid + 128]; } __syncthreads(); } if (winsz >= 128) { if (tid < 64) { dline[tid] += dline[tid + 64]; } __syncthreads(); } volatile float* vdline = smem + winsz * threadIdx.z; if (winsz >= 64) if (tid < 32) vdline[tid] += vdline[tid + 32]; if (winsz >= 32) if (tid < 16) vdline[tid] += vdline[tid + 16]; if (winsz >= 16) if (tid < 8) vdline[tid] += vdline[tid + 8]; if (winsz >= 8) if (tid < 4) vdline[tid] += vdline[tid + 4]; if (winsz >= 4) if (tid < 2) vdline[tid] += vdline[tid + 2]; if (winsz >= 2) if (tid < 1) vdline[tid] += vdline[tid + 1]; if (tid == 0) data_cost[cdisp_step1 * d] = saturate_cast<T>(dline[0]); } } template <typename T> void compute_data_cost_caller_(const T* disp_selected_pyr, T* data_cost, int /*rows*/, int /*cols*/, int h, int w, int level, int nr_plane, int channels, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); switch(channels) { case 1: compute_data_cost<T, 1><<<grid, threads, 0, stream>>>(disp_selected_pyr, data_cost, h, w, level, nr_plane); break; case 3: compute_data_cost<T, 3><<<grid, threads, 0, stream>>>(disp_selected_pyr, data_cost, h, w, level, nr_plane); break; case 4: compute_data_cost<T, 4><<<grid, threads, 0, stream>>>(disp_selected_pyr, data_cost, h, w, level, nr_plane); break; default: cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "compute_data_cost_caller_"); } } template <typename T, int winsz> void compute_data_cost_reduce_caller_(const T* disp_selected_pyr, T* data_cost, int rows, int cols, int h, int w, int level, int nr_plane, int channels, cudaStream_t stream) { const int threadsNum = 256; const size_t smem_size = threadsNum * sizeof(float); dim3 threads(winsz, 1, threadsNum / winsz); dim3 grid(w, h, 1); grid.y *= divUp(nr_plane, threads.z); switch (channels) { case 1: compute_data_cost_reduce<T, winsz, 1><<<grid, threads, smem_size, stream>>>(disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane); break; case 3: compute_data_cost_reduce<T, winsz, 3><<<grid, threads, smem_size, stream>>>(disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane); break; case 4: compute_data_cost_reduce<T, winsz, 4><<<grid, threads, smem_size, stream>>>(disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane); break; default: cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "compute_data_cost_reduce_caller_"); } } template<class T> void compute_data_cost(const T* disp_selected_pyr, T* data_cost, size_t msg_step1, size_t msg_step2, int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream) { typedef void (*ComputeDataCostCaller)(const T* disp_selected_pyr, T* data_cost, int rows, int cols, int h, int w, int level, int nr_plane, int channels, cudaStream_t stream); static const ComputeDataCostCaller callers[] = { compute_data_cost_caller_<T>, compute_data_cost_caller_<T>, compute_data_cost_reduce_caller_<T, 4>, compute_data_cost_reduce_caller_<T, 8>, compute_data_cost_reduce_caller_<T, 16>, compute_data_cost_reduce_caller_<T, 32>, compute_data_cost_reduce_caller_<T, 64>, compute_data_cost_reduce_caller_<T, 128>, compute_data_cost_reduce_caller_<T, 256> }; size_t disp_step1 = msg_step1 * h; size_t disp_step2 = msg_step2 * h2; cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step1, sizeof(size_t)) ); cudaSafeCall( cudaMemcpyToSymbol(cdisp_step2, &disp_step2, sizeof(size_t)) ); cudaSafeCall( cudaMemcpyToSymbol(cmsg_step1, &msg_step1, sizeof(size_t)) ); cudaSafeCall( cudaMemcpyToSymbol(cmsg_step2, &msg_step2, sizeof(size_t)) ); callers[level](disp_selected_pyr, data_cost, rows, cols, h, w, level, nr_plane, channels, stream); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void compute_data_cost(const short* disp_selected_pyr, short* data_cost, size_t msg_step1, size_t msg_step2, int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream); template void compute_data_cost(const float* disp_selected_pyr, float* data_cost, size_t msg_step1, size_t msg_step2, int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream); /////////////////////////////////////////////////////////////// //////////////////////// init message ///////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __device__ void get_first_k_element_increase(T* u_new, T* d_new, T* l_new, T* r_new, const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur, T* data_cost_selected, T* disparity_selected_new, T* data_cost_new, const T* data_cost_cur, const T* disparity_selected_cur, int nr_plane, int nr_plane2) { for(int i = 0; i < nr_plane; i++) { T minimum = numeric_limits<T>::max(); int id = 0; for(int j = 0; j < nr_plane2; j++) { T cur = data_cost_new[j * cdisp_step1]; if(cur < minimum) { minimum = cur; id = j; } } data_cost_selected[i * cdisp_step1] = data_cost_cur[id * cdisp_step1]; disparity_selected_new[i * cdisp_step1] = disparity_selected_cur[id * cdisp_step2]; u_new[i * cdisp_step1] = u_cur[id * cdisp_step2]; d_new[i * cdisp_step1] = d_cur[id * cdisp_step2]; l_new[i * cdisp_step1] = l_cur[id * cdisp_step2]; r_new[i * cdisp_step1] = r_cur[id * cdisp_step2]; data_cost_new[id * cdisp_step1] = numeric_limits<T>::max(); } } template <typename T> __global__ void init_message(T* u_new_, T* d_new_, T* l_new_, T* r_new_, const T* u_cur_, const T* d_cur_, const T* l_cur_, const T* r_cur_, T* selected_disp_pyr_new, const T* selected_disp_pyr_cur, T* data_cost_selected_, const T* data_cost_, int h, int w, int nr_plane, int h2, int w2, int nr_plane2) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { const T* u_cur = u_cur_ + ::min(h2-1, y/2 + 1) * cmsg_step2 + x/2; const T* d_cur = d_cur_ + ::max(0, y/2 - 1) * cmsg_step2 + x/2; const T* l_cur = l_cur_ + y/2 * cmsg_step2 + ::min(w2-1, x/2 + 1); const T* r_cur = r_cur_ + y/2 * cmsg_step2 + ::max(0, x/2 - 1); T* data_cost_new = (T*)ctemp + y * cmsg_step1 + x; const T* disparity_selected_cur = selected_disp_pyr_cur + y/2 * cmsg_step2 + x/2; const T* data_cost = data_cost_ + y * cmsg_step1 + x; for(int d = 0; d < nr_plane2; d++) { int idx2 = d * cdisp_step2; T val = data_cost[d * cdisp_step1] + u_cur[idx2] + d_cur[idx2] + l_cur[idx2] + r_cur[idx2]; data_cost_new[d * cdisp_step1] = val; } T* data_cost_selected = data_cost_selected_ + y * cmsg_step1 + x; T* disparity_selected_new = selected_disp_pyr_new + y * cmsg_step1 + x; T* u_new = u_new_ + y * cmsg_step1 + x; T* d_new = d_new_ + y * cmsg_step1 + x; T* l_new = l_new_ + y * cmsg_step1 + x; T* r_new = r_new_ + y * cmsg_step1 + x; u_cur = u_cur_ + y/2 * cmsg_step2 + x/2; d_cur = d_cur_ + y/2 * cmsg_step2 + x/2; l_cur = l_cur_ + y/2 * cmsg_step2 + x/2; r_cur = r_cur_ + y/2 * cmsg_step2 + x/2; get_first_k_element_increase(u_new, d_new, l_new, r_new, u_cur, d_cur, l_cur, r_cur, data_cost_selected, disparity_selected_new, data_cost_new, data_cost, disparity_selected_cur, nr_plane, nr_plane2); } } template<class T> void init_message(T* u_new, T* d_new, T* l_new, T* r_new, const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur, T* selected_disp_pyr_new, const T* selected_disp_pyr_cur, T* data_cost_selected, const T* data_cost, size_t msg_step1, size_t msg_step2, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream) { size_t disp_step1 = msg_step1 * h; size_t disp_step2 = msg_step2 * h2; cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step1, sizeof(size_t)) ); cudaSafeCall( cudaMemcpyToSymbol(cdisp_step2, &disp_step2, sizeof(size_t)) ); cudaSafeCall( cudaMemcpyToSymbol(cmsg_step1, &msg_step1, sizeof(size_t)) ); cudaSafeCall( cudaMemcpyToSymbol(cmsg_step2, &msg_step2, sizeof(size_t)) ); dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); init_message<<<grid, threads, 0, stream>>>(u_new, d_new, l_new, r_new, u_cur, d_cur, l_cur, r_cur, selected_disp_pyr_new, selected_disp_pyr_cur, data_cost_selected, data_cost, h, w, nr_plane, h2, w2, nr_plane2); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void init_message(short* u_new, short* d_new, short* l_new, short* r_new, const short* u_cur, const short* d_cur, const short* l_cur, const short* r_cur, short* selected_disp_pyr_new, const short* selected_disp_pyr_cur, short* data_cost_selected, const short* data_cost, size_t msg_step1, size_t msg_step2, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream); template void init_message(float* u_new, float* d_new, float* l_new, float* r_new, const float* u_cur, const float* d_cur, const float* l_cur, const float* r_cur, float* selected_disp_pyr_new, const float* selected_disp_pyr_cur, float* data_cost_selected, const float* data_cost, size_t msg_step1, size_t msg_step2, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream); /////////////////////////////////////////////////////////////// //////////////////// calc all iterations ///////////////////// /////////////////////////////////////////////////////////////// template <typename T> __device__ void message_per_pixel(const T* data, T* msg_dst, const T* msg1, const T* msg2, const T* msg3, const T* dst_disp, const T* src_disp, int nr_plane, T* temp) { T minimum = numeric_limits<T>::max(); for(int d = 0; d < nr_plane; d++) { int idx = d * cdisp_step1; T val = data[idx] + msg1[idx] + msg2[idx] + msg3[idx]; if(val < minimum) minimum = val; msg_dst[idx] = val; } float sum = 0; for(int d = 0; d < nr_plane; d++) { float cost_min = minimum + cmax_disc_term; T src_disp_reg = src_disp[d * cdisp_step1]; for(int d2 = 0; d2 < nr_plane; d2++) cost_min = fmin(cost_min, msg_dst[d2 * cdisp_step1] + cdisc_single_jump * ::abs(dst_disp[d2 * cdisp_step1] - src_disp_reg)); temp[d * cdisp_step1] = saturate_cast<T>(cost_min); sum += cost_min; } sum /= nr_plane; for(int d = 0; d < nr_plane; d++) msg_dst[d * cdisp_step1] = saturate_cast<T>(temp[d * cdisp_step1] - sum); } template <typename T> __global__ void compute_message(T* u_, T* d_, T* l_, T* r_, const T* data_cost_selected, const T* selected_disp_pyr_cur, int h, int w, int nr_plane, int i) { int y = blockIdx.y * blockDim.y + threadIdx.y; int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + i) & 1); if (y > 0 && y < h - 1 && x > 0 && x < w - 1) { const T* data = data_cost_selected + y * cmsg_step1 + x; T* u = u_ + y * cmsg_step1 + x; T* d = d_ + y * cmsg_step1 + x; T* l = l_ + y * cmsg_step1 + x; T* r = r_ + y * cmsg_step1 + x; const T* disp = selected_disp_pyr_cur + y * cmsg_step1 + x; T* temp = (T*)ctemp + y * cmsg_step1 + x; message_per_pixel(data, u, r - 1, u + cmsg_step1, l + 1, disp, disp - cmsg_step1, nr_plane, temp); message_per_pixel(data, d, d - cmsg_step1, r - 1, l + 1, disp, disp + cmsg_step1, nr_plane, temp); message_per_pixel(data, l, u + cmsg_step1, d - cmsg_step1, l + 1, disp, disp - 1, nr_plane, temp); message_per_pixel(data, r, u + cmsg_step1, d - cmsg_step1, r - 1, disp, disp + 1, nr_plane, temp); } } template<class T> void calc_all_iterations(T* u, T* d, T* l, T* r, const T* data_cost_selected, const T* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, cudaStream_t stream) { size_t disp_step = msg_step * h; cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step, sizeof(size_t)) ); cudaSafeCall( cudaMemcpyToSymbol(cmsg_step1, &msg_step, sizeof(size_t)) ); dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x << 1); grid.y = divUp(h, threads.y); for(int t = 0; t < iters; ++t) { compute_message<<<grid, threads, 0, stream>>>(u, d, l, r, data_cost_selected, selected_disp_pyr_cur, h, w, nr_plane, t & 1); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } }; template void calc_all_iterations(short* u, short* d, short* l, short* r, const short* data_cost_selected, const short* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, cudaStream_t stream); template void calc_all_iterations(float* u, float* d, float* l, float* r, const float* data_cost_selected, const float* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, cudaStream_t stream); /////////////////////////////////////////////////////////////// /////////////////////////// output //////////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __global__ void compute_disp(const T* u_, const T* d_, const T* l_, const T* r_, const T* data_cost_selected, const T* disp_selected_pyr, short* disp, size_t res_step, int cols, int rows, int nr_plane) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y > 0 && y < rows - 1 && x > 0 && x < cols - 1) { const T* data = data_cost_selected + y * cmsg_step1 + x; const T* disp_selected = disp_selected_pyr + y * cmsg_step1 + x; const T* u = u_ + (y+1) * cmsg_step1 + (x+0); const T* d = d_ + (y-1) * cmsg_step1 + (x+0); const T* l = l_ + (y+0) * cmsg_step1 + (x+1); const T* r = r_ + (y+0) * cmsg_step1 + (x-1); int best = 0; T best_val = numeric_limits<T>::max(); for (int i = 0; i < nr_plane; ++i) { int idx = i * cdisp_step1; T val = data[idx]+ u[idx] + d[idx] + l[idx] + r[idx]; if (val < best_val) { best_val = val; best = saturate_cast<short>(disp_selected[idx]); } } disp[res_step * y + x] = best; } } template<class T> void compute_disp(const T* u, const T* d, const T* l, const T* r, const T* data_cost_selected, const T* disp_selected, size_t msg_step, const DevMem2D_<short>& disp, int nr_plane, cudaStream_t stream) { size_t disp_step = disp.rows * msg_step; cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step, sizeof(size_t)) ); cudaSafeCall( cudaMemcpyToSymbol(cmsg_step1, &msg_step, sizeof(size_t)) ); dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(disp.cols, threads.x); grid.y = divUp(disp.rows, threads.y); compute_disp<<<grid, threads, 0, stream>>>(u, d, l, r, data_cost_selected, disp_selected, disp.data, disp.step / disp.elemSize(), disp.cols, disp.rows, nr_plane); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void compute_disp(const short* u, const short* d, const short* l, const short* r, const short* data_cost_selected, const short* disp_selected, size_t msg_step, const DevMem2D_<short>& disp, int nr_plane, cudaStream_t stream); template void compute_disp(const float* u, const float* d, const float* l, const float* r, const float* data_cost_selected, const float* disp_selected, size_t msg_step, const DevMem2D_<short>& disp, int nr_plane, cudaStream_t stream); } // namespace stereocsbp }}} // namespace cv { namespace gpu { namespace device {
b287cd1fa7a62df478010608614d8d470876f7b2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <cudaMatrixScaleAndAddFunctions.h> __global__ void scaleAndAddKernel(int size, float scale, float *B, float *C, float *D) { for (int i = 0; i < size; i++) { B[i] = scale * B[i]; printf("%f ", B[i]); } printf("\n"); }
b287cd1fa7a62df478010608614d8d470876f7b2.cu
#include <stdio.h> #include <cuda_runtime.h> #include <cudaMatrixScaleAndAddFunctions.h> __global__ void scaleAndAddKernel(int size, float scale, float *B, float *C, float *D) { for (int i = 0; i < size; i++) { B[i] = scale * B[i]; printf("%f ", B[i]); } printf("\n"); }
5396bdd69fed073dbc27e749463fc073d5938aff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h" #include "HeterogeneousCore/CUDAUtilities/interface/exitSansCUDADevices.h" __global__ void testIt(int one){ assert(one == 1); } int main(int argc, char* argv[]) { exitSansCUDADevices(); hipLaunchKernelGGL(( testIt), dim3(1),dim3(1), 0, 0, argc); hipDeviceSynchronize(); return (argc == 1) ? EXIT_SUCCESS : EXIT_FAILURE; }
5396bdd69fed073dbc27e749463fc073d5938aff.cu
#include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h" #include "HeterogeneousCore/CUDAUtilities/interface/exitSansCUDADevices.h" __global__ void testIt(int one){ assert(one == 1); } int main(int argc, char* argv[]) { exitSansCUDADevices(); testIt<<<1,1>>>(argc); cudaDeviceSynchronize(); return (argc == 1) ? EXIT_SUCCESS : EXIT_FAILURE; }
cc4a09df7c1e74486496f6e5256ef00585ef39a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TPB 16 __global__ void ldc_D3Q15_LBGK_ts(float * fOut,const float * fIn, const int * snl, const int * lnl, const float u_bc,const float omega, const int Nx, const int Ny, const int Nz){ int X=threadIdx.x+blockIdx.x*blockDim.x; int Y=threadIdx.y+blockIdx.y*blockDim.y; int Z=threadIdx.z+blockIdx.z*blockDim.z; if((X<Nx)&&(Y<Ny)&&(Z<Nz)){ int tid=X+Y*Nx+Z*Nx*Ny; float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14; float cu; //load the data into registers f0=fIn[tid]; f1=fIn[Nx*Ny*Nz+tid]; f2=fIn[2*Nx*Ny*Nz+tid]; f3=fIn[3*Nx*Ny*Nz+tid]; f4=fIn[4*Nx*Ny*Nz+tid]; f5=fIn[5*Nx*Ny*Nz+tid]; f6=fIn[6*Nx*Ny*Nz+tid]; f7=fIn[7*Nx*Ny*Nz+tid]; f8=fIn[8*Nx*Ny*Nz+tid]; f9=fIn[9*Nx*Ny*Nz+tid]; f10=fIn[10*Nx*Ny*Nz+tid]; f11=fIn[11*Nx*Ny*Nz+tid]; f12=fIn[12*Nx*Ny*Nz+tid]; f13=fIn[13*Nx*Ny*Nz+tid]; f14=fIn[14*Nx*Ny*Nz+tid]; //compute density float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14; float ux=f1-f2+f7-f8+f9-f10+f11-f12+f13-f14; ux/=rho; float uy=f3-f4+f7+f8-f9-f10+f11+f12-f13-f14; uy/=rho; float uz=f5-f6+f7+f8+f9+f10-f11-f12-f13-f14; uz/=rho; //if it's a lid node, update //if(lnl[tid]==1){ if((X==0)&&(!((Y==0)||(Y==(Ny-1))||(Z==0)||(Z==(Nz-1))))){ //speed 1 ex=1 ey=ez=0. w=1./9. cu=3.*(1.)*(-ux); f1+=(1./9.)*rho*cu; //speed 2 ex=-1 ey=ez=0. w=1./9. cu=3.*(-1.)*(-ux); f2+=(1./9.)*rho*cu; //speed 3 ey=1; ex=ez=0; w=1./9. cu=3.*(1.)*(u_bc-uy); f3+=(1./9.)*rho*cu; //speed 4 ey=-1; ex=ez=0; w=1./9. cu=3.*(-1.)*(u_bc-uy); f4+=(1./9.)*rho*cu; //speed 5 ex=ey=0; ez=1; w=1./9. cu=3.*(1.)*(-uz); f5+=(1./9.)*rho*cu; //speed 6 ex=ey=0; ez=-1; w=1./9. cu=3.*(-1.)*(-uz); f6+=(1./9.)*rho*cu; //speed 7 ex=ey=ez=1; w=1./72. cu=3.*((1.)*-ux+(1.)*(u_bc-uy)+(1.)*-uz); f7+=(1./72.)*rho*cu; //speed 8 ex=-1 ey=ez=1; w=1./72. cu=3.*((-1.)*-ux+(1.)*(u_bc-uy)+(1.)*-uz); f8+=(1./72.)*rho*cu; //speed 9 ex=1 ey=-1 ez=1 cu=3.0*((1.)*-ux+(-1.)*(u_bc-uy)+(1.)*-uz); f9+=(1./72.)*rho*cu; //speed 10 ex=-1 ey=-1 ez=1 cu=3.0*((-1.)*-ux+(-1.)*(u_bc-uy)+(1.)*-uz); f10+=(1./72.)*rho*cu; //speed 11 ex=1 ey=1 ez=-1 cu=3.0*((1.)*-ux +(1.)*(u_bc-uy)+(-1.)*-uz); f11+=(1./72.)*rho*cu; //speed 12 ex=-1 ey=1 ez=-1 cu=3.0*((-1.)*-ux+(1.)*(u_bc-uy)+(-1.)*-uz); f12+=(1./72.)*rho*cu; //speed 13 ex=1 ey=-1 ez=-1 w=1./72. cu=3.0*((1.)*-ux+(-1.)*(u_bc-uy)+(-1.)*-uz); f13+=(1./72.)*rho*cu; //speed 14 ex=ey=ez=-1 w=1./72. cu=3.0*((-1.)*-ux + (-1.)*(u_bc-uy) +(-1.)*-uz); f14+=(1./72.)*rho*cu; ux=0.; uy=u_bc; uz=0.; }//if(lnl[tid]==1)... //if(snl[tid]==1){ if(((Y==0)||(Y==(Ny-1))||(Z==0)||(Z==(Nz-1))||(X==(Nx-1)))){ // 1--2 cu=f2; f2=f1; f1=cu; //3--4 cu=f4; f4=f3; f3=cu; //5--6 cu=f6; f6=f5; f5=cu; //7--14 cu=f14; f14=f7; f7=cu; //8--13 cu=f13; f13=f8; f8=cu; //9--12 cu=f12; f12=f9; f9=cu; //10--11 cu=f11; f11=f10; f10=cu; }else{ //relax //speed 0 ex=ey=ez=0 w=2./9. float fEq; fEq=rho*(2./9.)*(1.-1.5*(ux*ux+uy*uy+uz*uz)); f0=f0-omega*(f0-fEq); //speed 1 ex=1 ey=ez=0 w=1./9. cu=3.*(1.*ux); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f1=f1-omega*(f1-fEq); //speed 2 ex=-1 ey=ez=0 w=1./9. cu=3.*((-1.)*ux); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f2=f2-omega*(f2-fEq); //speed 3 ex=0 ey=1 ez=0 w=1./9. cu=3.*(1.*uy); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f3=f3-omega*(f3-fEq); //speed 4 ex=0 ey=-1 ez=0 w=1./9. cu=3.*(-1.*uy); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f4=f4-omega*(f4-fEq); //speed 5 ex=ey=0 ez=1 w=1./9. cu=3.*(1.*uz); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f5=f5-omega*(f5-fEq); //speed 6 ex=ey=0 ez=-1 w=1./9. cu=3.*(-1.*uz); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f6=f6-omega*(f6-fEq); //speed 7 ex=ey=ez=1 w=1./72. cu=3.*(ux+uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f7=f7-omega*(f7-fEq); //speed 8 ex=-1 ey=ez=1 w=1./72. cu=3.*(-ux+uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f8=f8-omega*(f8-fEq); //speed 9 ex=1 ey=-1 ez=1 w=1./72. cu=3.*(ux-uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f9=f9-omega*(f9-fEq); //speed 10 ex=-1 ey=-1 ez=1 w=1/72 cu=3.*(-ux-uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f10=f10-omega*(f10-fEq); //speed 11 ex=1 ey=1 ez=-1 w=1/72 cu=3.*(ux+uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f11=f11-omega*(f11-fEq); //speed 12 ex=-1 ey=1 ez=-1 w=1/72 cu=3.*(-ux+uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f12=f12-omega*(f12-fEq); //speed 13 ex=1 ey=ez=-1 w=1/72 cu=3.*(ux-uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f13=f13-omega*(f13-fEq); //speed 14 ex=ey=ez=-1 w=1/72 cu=3.*(-ux-uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f14=f14-omega*(f14-fEq); }//if/else snl //now, everybody streams... int X_t, Y_t, Z_t; int tid_t; //speed 0 ex=ey=ez=0 fOut[tid]=f0; //speed 1 ex=1 ey=ez=0 X_t=X+1; Y_t=Y; Z_t=Z; if(X_t==Nx) X_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[Nx*Ny*Nz+tid_t]=f1; //speed 2 ex=-1 ey=ez=0; X_t=X-1; Y_t=Y; Z_t=Z; if(X_t<0) X_t=(Nx-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[2*Nx*Ny*Nz+tid_t]=f2; //speed 3 ex=0 ey=1 ez=0 X_t=X; Y_t=Y+1; Z_t=Z; if(Y_t==Ny) Y_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[3*Nx*Ny*Nz+tid_t]=f3; //speed 4 ex=0 ey=-1 ez=0 X_t=X; Y_t=Y-1; Z_t=Z; if(Y_t<0) Y_t=(Ny-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[4*Nx*Ny*Nz+tid_t]=f4; //speed 5 ex=ey=0 ez=1 X_t=X; Y_t=Y; Z_t=Z+1; if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[5*Nx*Ny*Nz+tid_t]=f5; //speed 6 ex=ey=0 ez=-1 X_t=X; Y_t=Y; Z_t=Z-1; if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[6*Nx*Ny*Nz+tid_t]=f6; //speed 7 ex=ey=ez=1 X_t=X+1; Y_t=Y+1; Z_t=Z+1; if(X_t==Nx) X_t=0; if(Y_t==Ny) Y_t=0; if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[7*Nx*Ny*Nz+tid_t]=f7; //speed 8 ex=-1 ey=1 ez=1 X_t=X-1; Y_t=Y+1; Z_t=Z+1; if(X_t<0) X_t=(Nx-1); if(Y_t==Ny) Y_t=0; if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[8*Nx*Ny*Nz+tid_t]=f8; //speed 9 ex=1 ey=-1 ez=1 X_t=X+1; Y_t=Y-1; Z_t=Z+1; if(X_t==Nx) X_t=0; if(Y_t<0) Y_t=(Ny-1); if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[9*Nx*Ny*Nz+tid_t]=f9; //speed 10 ex=-1 ey=-1 ez=1 X_t=X-1; Y_t=Y-1; Z_t=Z+1; if(X_t<0) X_t=(Nx-1); if(Y_t<0) Y_t=(Ny-1); if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[10*Nx*Ny*Nz+tid_t]=f10; //speed 11 ex=1 ey=1 ez=-1 X_t=X+1; Y_t=Y+1; Z_t=Z-1; if(X_t==Nx) X_t=0; if(Y_t==Ny) Y_t=0; if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[11*Nx*Ny*Nz+tid_t]=f11; //speed 12 ex=-1 ey=1 ez=-1 X_t=X-1; Y_t=Y+1; Z_t=Z-1; if(X_t<0) X_t=(Nx-1); if(Y_t==Ny) Y_t=0; if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[12*Nx*Ny*Nz+tid_t]=f12; //speed 13 ex=1 ey=-1 ez=-1 X_t=X+1; Y_t=Y-1; Z_t=Z-1; if(X_t==Nx) X_t=0; if(Y_t<0) Y_t=(Ny-1); if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[13*Nx*Ny*Nz+tid_t]=f13; //speed 14 ex=ey=ez=-1 X_t=X-1; Y_t=Y-1; Z_t=Z-1; if(X_t<0) X_t=(Nx-1); if(Y_t<0) Y_t=(Ny-1); if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[14*Nx*Ny*Nz+tid_t]=f14; }//if(X<Nx... } void ldc_D3Q15_LBGK_ts_cuda(float * fOut, const float * fIn, const int * snl, const int * lnl, const float u_bc, const float omega, const int Nx, const int Ny, const int Nz){ dim3 BLOCKS(TPB,TPB,1); dim3 GRIDS((Nx+TPB-1)/TPB,(Ny+TPB-1)/TPB,Nz); hipLaunchKernelGGL(( ldc_D3Q15_LBGK_ts), dim3(GRIDS),dim3(BLOCKS), 0, 0, fOut,fIn,snl,lnl,u_bc, omega,Nx,Ny,Nz); } __global__ void ldc_D3Q15_LBGK_tsT(float * fOut,const float * fIn, const int * snl, const int * lnl, const float u_bc,const float omega, const int Nx, const int Ny, const int Nz){ int X=threadIdx.x+blockIdx.x*blockDim.x; int Y=threadIdx.y+blockIdx.y*blockDim.y; int Z=threadIdx.z+blockIdx.z*blockDim.z; if((X<Nx)&&(Y<Ny)&&(Z<Nz)){ int tid=X+Y*Nx+Z*Nx*Ny; //3 mul, 3 add float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14; float cu; //load the data into registers // f0=fIn[tid]; f1=fIn[Nx*Ny*Nz+tid]; // f2=fIn[2*Nx*Ny*Nz+tid]; f3=fIn[3*Nx*Ny*Nz+tid]; // f4=fIn[4*Nx*Ny*Nz+tid]; f5=fIn[5*Nx*Ny*Nz+tid]; // f6=fIn[6*Nx*Ny*Nz+tid]; f7=fIn[7*Nx*Ny*Nz+tid]; // f8=fIn[8*Nx*Ny*Nz+tid]; f9=fIn[9*Nx*Ny*Nz+tid]; // f10=fIn[10*Nx*Ny*Nz+tid]; f11=fIn[11*Nx*Ny*Nz+tid]; // f12=fIn[12*Nx*Ny*Nz+tid]; f13=fIn[13*Nx*Ny*Nz+tid]; // f14=fIn[14*Nx*Ny*Nz+tid]; f0=fIn[tid*15]; f1=fIn[tid*15+1]; f2=fIn[tid*15+2]; f3=fIn[tid*15+3]; f4=fIn[tid*15+4]; f5=fIn[tid*15+5]; f6=fIn[tid*15+6]; f7=fIn[tid*15+7]; f8=fIn[tid*15+8]; f9=fIn[tid*15+9]; f10=fIn[tid*15+10]; f11=fIn[tid*15+11]; f12=fIn[tid*15+12]; f13=fIn[tid*15+13]; f14=fIn[tid*15+14]; //compute density float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14;//13 add float ux=f1-f2+f7-f8+f9-f10+f11-f12+f13-f14; ux/=rho; //9 add, 1 mul float uy=f3-f4+f7+f8-f9-f10+f11+f12-f13-f14; uy/=rho;//9 add, 1 mul float uz=f5-f6+f7+f8+f9+f10-f11-f12-f13-f14; uz/=rho;//9 add, 1 mul //if it's a lid node, update // if(lnl[tid]==1){ if((X==0)&&(!((Y==0)||(Y==(Ny-1))||(Z==0)||(Z==(Nz-1))))){ //speed 1 ex=1 ey=ez=0. w=1./9. //6 mul, 1 add cu=3.*(1.)*(-ux); f1+=(1./9.)*rho*cu; //speed 2 ex=-1 ey=ez=0. w=1./9. //6 mul, 1 add cu=3.*(-1.)*(-ux); f2+=(1./9.)*rho*cu; //speed 3 ey=1; ex=ez=0; w=1./9. //6 mul, 2 add cu=3.*(1.)*(u_bc-uy); f3+=(1./9.)*rho*cu; //speed 4 ey=-1; ex=ez=0; w=1./9. //6 mul, 2 add cu=3.*(-1.)*(u_bc-uy); f4+=(1./9.)*rho*cu; //speed 5 ex=ey=0; ez=1; w=1./9. //6 mul, 2 add cu=3.*(1.)*(-uz); f5+=(1./9.)*rho*cu; //speed 6 ex=ey=0; ez=-1; w=1./9. //6 mul, 1 add cu=3.*(-1.)*(-uz); f6+=(1./9.)*rho*cu; //speed 7 ex=ey=ez=1; w=1./72. cu=3.*((1.)*-ux+(1.)*(u_bc-uy)+(1.)*-uz); //9 mul, 4 add f7+=(1./72.)*rho*cu; //speed 8 ex=-1 ey=ez=1; w=1./72. cu=3.*((-1.)*-ux+(1.)*(u_bc-uy)+(1.)*-uz); //9 mul, 4 add f8+=(1./72.)*rho*cu; //speed 9 ex=1 ey=-1 ez=1 cu=3.0*((1.)*-ux+(-1.)*(u_bc-uy)+(1.)*-uz);//9 mul, 4 add f9+=(1./72.)*rho*cu; //speed 10 ex=-1 ey=-1 ez=1 cu=3.0*((-1.)*-ux+(-1.)*(u_bc-uy)+(1.)*-uz); //9 mul, 4 add f10+=(1./72.)*rho*cu; //speed 11 ex=1 ey=1 ez=-1 cu=3.0*((1.)*-ux +(1.)*(u_bc-uy)+(-1.)*-uz); //9 mul, 4 add f11+=(1./72.)*rho*cu; //speed 12 ex=-1 ey=1 ez=-1 cu=3.0*((-1.)*-ux+(1.)*(u_bc-uy)+(-1.)*-uz);// 9 mul, 4 add f12+=(1./72.)*rho*cu; //speed 13 ex=1 ey=-1 ez=-1 w=1./72. cu=3.0*((1.)*-ux+(-1.)*(u_bc-uy)+(-1.)*-uz);//9 mul, 4 add f13+=(1./72.)*rho*cu; //speed 14 ex=ey=ez=-1 w=1./72. cu=3.0*((-1.)*-ux + (-1.)*(u_bc-uy) +(-1.)*-uz); //9 mul, 4 add f14+=(1./72.)*rho*cu; ux=0.; uy=u_bc; uz=0.; }//if(lnl[tid]==1)... //if(snl[tid]==1){ if(((Y==0)||(Y==(Ny-1))||(Z==0)||(Z==(Nz-1))||(X==(Nx-1)))){ // 1--2 cu=f2; f2=f1; f1=cu; //3--4 cu=f4; f4=f3; f3=cu; //5--6 cu=f6; f6=f5; f5=cu; //7--14 cu=f14; f14=f7; f7=cu; //8--13 cu=f13; f13=f8; f8=cu; //9--12 cu=f12; f12=f9; f9=cu; //10--11 cu=f11; f11=f10; f10=cu; }else{ //relax //speed 0 ex=ey=ez=0 w=2./9. float fEq; fEq=rho*(2./9.)*(1.-1.5*(ux*ux+uy*uy+uz*uz)); //est 10 mul, 5 add per speed f0=f0-omega*(f0-fEq); //speed 1 ex=1 ey=ez=0 w=1./9. cu=3.*(1.*ux); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f1=f1-omega*(f1-fEq); //speed 2 ex=-1 ey=ez=0 w=1./9. cu=3.*((-1.)*ux); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f2=f2-omega*(f2-fEq); //speed 3 ex=0 ey=1 ez=0 w=1./9. cu=3.*(1.*uy); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f3=f3-omega*(f3-fEq); //speed 4 ex=0 ey=-1 ez=0 w=1./9. cu=3.*(-1.*uy); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f4=f4-omega*(f4-fEq); //speed 5 ex=ey=0 ez=1 w=1./9. cu=3.*(1.*uz); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f5=f5-omega*(f5-fEq); //speed 6 ex=ey=0 ez=-1 w=1./9. cu=3.*(-1.*uz); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f6=f6-omega*(f6-fEq); //speed 7 ex=ey=ez=1 w=1./72. cu=3.*(ux+uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f7=f7-omega*(f7-fEq); //speed 8 ex=-1 ey=ez=1 w=1./72. cu=3.*(-ux+uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f8=f8-omega*(f8-fEq); //speed 9 ex=1 ey=-1 ez=1 w=1./72. cu=3.*(ux-uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f9=f9-omega*(f9-fEq); //speed 10 ex=-1 ey=-1 ez=1 w=1/72 cu=3.*(-ux-uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f10=f10-omega*(f10-fEq); //speed 11 ex=1 ey=1 ez=-1 w=1/72 cu=3.*(ux+uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f11=f11-omega*(f11-fEq); //speed 12 ex=-1 ey=1 ez=-1 w=1/72 cu=3.*(-ux+uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f12=f12-omega*(f12-fEq); //speed 13 ex=1 ey=ez=-1 w=1/72 cu=3.*(ux-uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f13=f13-omega*(f13-fEq); //speed 14 ex=ey=ez=-1 w=1/72 cu=3.*(-ux-uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f14=f14-omega*(f14-fEq); }//if/else snl //now, everybody streams... int X_t, Y_t, Z_t; int tid_t; //speed 0 ex=ey=ez=0 //fOut[tid]=f0; fOut[tid*15]=f0; //speed 1 ex=1 ey=ez=0 // est 5 mul, 3 add per speed X_t=X+1; Y_t=Y; Z_t=Z; if(X_t==Nx) X_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; //fOut[Nx*Ny*Nz+tid_t]=f1; fOut[tid_t*15+1]=f1; //speed 2 ex=-1 ey=ez=0; X_t=X-1; Y_t=Y; Z_t=Z; if(X_t<0) X_t=(Nx-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[2*Nx*Ny*Nz+tid_t]=f2; fOut[tid_t*15+2]=f2; //speed 3 ex=0 ey=1 ez=0 X_t=X; Y_t=Y+1; Z_t=Z; if(Y_t==Ny) Y_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[3*Nx*Ny*Nz+tid_t]=f3; fOut[tid_t*15+3]=f3; //speed 4 ex=0 ey=-1 ez=0 X_t=X; Y_t=Y-1; Z_t=Z; if(Y_t<0) Y_t=(Ny-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; //fOut[4*Nx*Ny*Nz+tid_t]=f4; fOut[tid_t*15+4]=f4; //speed 5 ex=ey=0 ez=1 X_t=X; Y_t=Y; Z_t=Z+1; if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[5*Nx*Ny*Nz+tid_t]=f5; fOut[tid_t*15+5]=f5; //speed 6 ex=ey=0 ez=-1 X_t=X; Y_t=Y; Z_t=Z-1; if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[6*Nx*Ny*Nz+tid_t]=f6; fOut[tid_t*15+6]=f6; //speed 7 ex=ey=ez=1 X_t=X+1; Y_t=Y+1; Z_t=Z+1; if(X_t==Nx) X_t=0; if(Y_t==Ny) Y_t=0; if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[7*Nx*Ny*Nz+tid_t]=f7; fOut[tid_t*15+7]=f7; //speed 8 ex=-1 ey=1 ez=1 X_t=X-1; Y_t=Y+1; Z_t=Z+1; if(X_t<0) X_t=(Nx-1); if(Y_t==Ny) Y_t=0; if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[8*Nx*Ny*Nz+tid_t]=f8; fOut[tid_t*15+8]=f8; //speed 9 ex=1 ey=-1 ez=1 X_t=X+1; Y_t=Y-1; Z_t=Z+1; if(X_t==Nx) X_t=0; if(Y_t<0) Y_t=(Ny-1); if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[9*Nx*Ny*Nz+tid_t]=f9; fOut[tid_t*15+9]=f9; //speed 10 ex=-1 ey=-1 ez=1 X_t=X-1; Y_t=Y-1; Z_t=Z+1; if(X_t<0) X_t=(Nx-1); if(Y_t<0) Y_t=(Ny-1); if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[10*Nx*Ny*Nz+tid_t]=f10; fOut[tid_t*15+10]=f10; //speed 11 ex=1 ey=1 ez=-1 X_t=X+1; Y_t=Y+1; Z_t=Z-1; if(X_t==Nx) X_t=0; if(Y_t==Ny) Y_t=0; if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; //fOut[11*Nx*Ny*Nz+tid_t]=f11; fOut[tid_t*15+11]=f11; //speed 12 ex=-1 ey=1 ez=-1 X_t=X-1; Y_t=Y+1; Z_t=Z-1; if(X_t<0) X_t=(Nx-1); if(Y_t==Ny) Y_t=0; if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[12*Nx*Ny*Nz+tid_t]=f12; fOut[tid_t*15+12]=f12; //speed 13 ex=1 ey=-1 ez=-1 X_t=X+1; Y_t=Y-1; Z_t=Z-1; if(X_t==Nx) X_t=0; if(Y_t<0) Y_t=(Ny-1); if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; //fOut[13*Nx*Ny*Nz+tid_t]=f13; fOut[tid_t*15+13]=f13; //speed 14 ex=ey=ez=-1 X_t=X-1; Y_t=Y-1; Z_t=Z-1; if(X_t<0) X_t=(Nx-1); if(Y_t<0) Y_t=(Ny-1); if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; //fOut[14*Nx*Ny*Nz+tid_t]=f14; fOut[tid_t*15+14]=f14; }//if(X<Nx... } void ldc_D3Q15_LBGK_ts_cudaT(float * fOut, const float * fIn, const int * snl, const int * lnl, const float u_bc, const float omega, const int Nx, const int Ny, const int Nz){ dim3 BLOCKS(TPB,TPB,1); dim3 GRIDS((Nx+TPB-1)/TPB,(Ny+TPB-1)/TPB,Nz); hipLaunchKernelGGL(( ldc_D3Q15_LBGK_tsT), dim3(GRIDS),dim3(BLOCKS), 0, 0, fOut,fIn,snl,lnl,u_bc, omega,Nx,Ny,Nz); }
cc4a09df7c1e74486496f6e5256ef00585ef39a4.cu
#define TPB 16 __global__ void ldc_D3Q15_LBGK_ts(float * fOut,const float * fIn, const int * snl, const int * lnl, const float u_bc,const float omega, const int Nx, const int Ny, const int Nz){ int X=threadIdx.x+blockIdx.x*blockDim.x; int Y=threadIdx.y+blockIdx.y*blockDim.y; int Z=threadIdx.z+blockIdx.z*blockDim.z; if((X<Nx)&&(Y<Ny)&&(Z<Nz)){ int tid=X+Y*Nx+Z*Nx*Ny; float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14; float cu; //load the data into registers f0=fIn[tid]; f1=fIn[Nx*Ny*Nz+tid]; f2=fIn[2*Nx*Ny*Nz+tid]; f3=fIn[3*Nx*Ny*Nz+tid]; f4=fIn[4*Nx*Ny*Nz+tid]; f5=fIn[5*Nx*Ny*Nz+tid]; f6=fIn[6*Nx*Ny*Nz+tid]; f7=fIn[7*Nx*Ny*Nz+tid]; f8=fIn[8*Nx*Ny*Nz+tid]; f9=fIn[9*Nx*Ny*Nz+tid]; f10=fIn[10*Nx*Ny*Nz+tid]; f11=fIn[11*Nx*Ny*Nz+tid]; f12=fIn[12*Nx*Ny*Nz+tid]; f13=fIn[13*Nx*Ny*Nz+tid]; f14=fIn[14*Nx*Ny*Nz+tid]; //compute density float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14; float ux=f1-f2+f7-f8+f9-f10+f11-f12+f13-f14; ux/=rho; float uy=f3-f4+f7+f8-f9-f10+f11+f12-f13-f14; uy/=rho; float uz=f5-f6+f7+f8+f9+f10-f11-f12-f13-f14; uz/=rho; //if it's a lid node, update //if(lnl[tid]==1){ if((X==0)&&(!((Y==0)||(Y==(Ny-1))||(Z==0)||(Z==(Nz-1))))){ //speed 1 ex=1 ey=ez=0. w=1./9. cu=3.*(1.)*(-ux); f1+=(1./9.)*rho*cu; //speed 2 ex=-1 ey=ez=0. w=1./9. cu=3.*(-1.)*(-ux); f2+=(1./9.)*rho*cu; //speed 3 ey=1; ex=ez=0; w=1./9. cu=3.*(1.)*(u_bc-uy); f3+=(1./9.)*rho*cu; //speed 4 ey=-1; ex=ez=0; w=1./9. cu=3.*(-1.)*(u_bc-uy); f4+=(1./9.)*rho*cu; //speed 5 ex=ey=0; ez=1; w=1./9. cu=3.*(1.)*(-uz); f5+=(1./9.)*rho*cu; //speed 6 ex=ey=0; ez=-1; w=1./9. cu=3.*(-1.)*(-uz); f6+=(1./9.)*rho*cu; //speed 7 ex=ey=ez=1; w=1./72. cu=3.*((1.)*-ux+(1.)*(u_bc-uy)+(1.)*-uz); f7+=(1./72.)*rho*cu; //speed 8 ex=-1 ey=ez=1; w=1./72. cu=3.*((-1.)*-ux+(1.)*(u_bc-uy)+(1.)*-uz); f8+=(1./72.)*rho*cu; //speed 9 ex=1 ey=-1 ez=1 cu=3.0*((1.)*-ux+(-1.)*(u_bc-uy)+(1.)*-uz); f9+=(1./72.)*rho*cu; //speed 10 ex=-1 ey=-1 ez=1 cu=3.0*((-1.)*-ux+(-1.)*(u_bc-uy)+(1.)*-uz); f10+=(1./72.)*rho*cu; //speed 11 ex=1 ey=1 ez=-1 cu=3.0*((1.)*-ux +(1.)*(u_bc-uy)+(-1.)*-uz); f11+=(1./72.)*rho*cu; //speed 12 ex=-1 ey=1 ez=-1 cu=3.0*((-1.)*-ux+(1.)*(u_bc-uy)+(-1.)*-uz); f12+=(1./72.)*rho*cu; //speed 13 ex=1 ey=-1 ez=-1 w=1./72. cu=3.0*((1.)*-ux+(-1.)*(u_bc-uy)+(-1.)*-uz); f13+=(1./72.)*rho*cu; //speed 14 ex=ey=ez=-1 w=1./72. cu=3.0*((-1.)*-ux + (-1.)*(u_bc-uy) +(-1.)*-uz); f14+=(1./72.)*rho*cu; ux=0.; uy=u_bc; uz=0.; }//if(lnl[tid]==1)... //if(snl[tid]==1){ if(((Y==0)||(Y==(Ny-1))||(Z==0)||(Z==(Nz-1))||(X==(Nx-1)))){ // 1--2 cu=f2; f2=f1; f1=cu; //3--4 cu=f4; f4=f3; f3=cu; //5--6 cu=f6; f6=f5; f5=cu; //7--14 cu=f14; f14=f7; f7=cu; //8--13 cu=f13; f13=f8; f8=cu; //9--12 cu=f12; f12=f9; f9=cu; //10--11 cu=f11; f11=f10; f10=cu; }else{ //relax //speed 0 ex=ey=ez=0 w=2./9. float fEq; fEq=rho*(2./9.)*(1.-1.5*(ux*ux+uy*uy+uz*uz)); f0=f0-omega*(f0-fEq); //speed 1 ex=1 ey=ez=0 w=1./9. cu=3.*(1.*ux); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f1=f1-omega*(f1-fEq); //speed 2 ex=-1 ey=ez=0 w=1./9. cu=3.*((-1.)*ux); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f2=f2-omega*(f2-fEq); //speed 3 ex=0 ey=1 ez=0 w=1./9. cu=3.*(1.*uy); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f3=f3-omega*(f3-fEq); //speed 4 ex=0 ey=-1 ez=0 w=1./9. cu=3.*(-1.*uy); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f4=f4-omega*(f4-fEq); //speed 5 ex=ey=0 ez=1 w=1./9. cu=3.*(1.*uz); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f5=f5-omega*(f5-fEq); //speed 6 ex=ey=0 ez=-1 w=1./9. cu=3.*(-1.*uz); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f6=f6-omega*(f6-fEq); //speed 7 ex=ey=ez=1 w=1./72. cu=3.*(ux+uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f7=f7-omega*(f7-fEq); //speed 8 ex=-1 ey=ez=1 w=1./72. cu=3.*(-ux+uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f8=f8-omega*(f8-fEq); //speed 9 ex=1 ey=-1 ez=1 w=1./72. cu=3.*(ux-uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f9=f9-omega*(f9-fEq); //speed 10 ex=-1 ey=-1 ez=1 w=1/72 cu=3.*(-ux-uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f10=f10-omega*(f10-fEq); //speed 11 ex=1 ey=1 ez=-1 w=1/72 cu=3.*(ux+uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f11=f11-omega*(f11-fEq); //speed 12 ex=-1 ey=1 ez=-1 w=1/72 cu=3.*(-ux+uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f12=f12-omega*(f12-fEq); //speed 13 ex=1 ey=ez=-1 w=1/72 cu=3.*(ux-uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f13=f13-omega*(f13-fEq); //speed 14 ex=ey=ez=-1 w=1/72 cu=3.*(-ux-uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f14=f14-omega*(f14-fEq); }//if/else snl //now, everybody streams... int X_t, Y_t, Z_t; int tid_t; //speed 0 ex=ey=ez=0 fOut[tid]=f0; //speed 1 ex=1 ey=ez=0 X_t=X+1; Y_t=Y; Z_t=Z; if(X_t==Nx) X_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[Nx*Ny*Nz+tid_t]=f1; //speed 2 ex=-1 ey=ez=0; X_t=X-1; Y_t=Y; Z_t=Z; if(X_t<0) X_t=(Nx-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[2*Nx*Ny*Nz+tid_t]=f2; //speed 3 ex=0 ey=1 ez=0 X_t=X; Y_t=Y+1; Z_t=Z; if(Y_t==Ny) Y_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[3*Nx*Ny*Nz+tid_t]=f3; //speed 4 ex=0 ey=-1 ez=0 X_t=X; Y_t=Y-1; Z_t=Z; if(Y_t<0) Y_t=(Ny-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[4*Nx*Ny*Nz+tid_t]=f4; //speed 5 ex=ey=0 ez=1 X_t=X; Y_t=Y; Z_t=Z+1; if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[5*Nx*Ny*Nz+tid_t]=f5; //speed 6 ex=ey=0 ez=-1 X_t=X; Y_t=Y; Z_t=Z-1; if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[6*Nx*Ny*Nz+tid_t]=f6; //speed 7 ex=ey=ez=1 X_t=X+1; Y_t=Y+1; Z_t=Z+1; if(X_t==Nx) X_t=0; if(Y_t==Ny) Y_t=0; if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[7*Nx*Ny*Nz+tid_t]=f7; //speed 8 ex=-1 ey=1 ez=1 X_t=X-1; Y_t=Y+1; Z_t=Z+1; if(X_t<0) X_t=(Nx-1); if(Y_t==Ny) Y_t=0; if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[8*Nx*Ny*Nz+tid_t]=f8; //speed 9 ex=1 ey=-1 ez=1 X_t=X+1; Y_t=Y-1; Z_t=Z+1; if(X_t==Nx) X_t=0; if(Y_t<0) Y_t=(Ny-1); if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[9*Nx*Ny*Nz+tid_t]=f9; //speed 10 ex=-1 ey=-1 ez=1 X_t=X-1; Y_t=Y-1; Z_t=Z+1; if(X_t<0) X_t=(Nx-1); if(Y_t<0) Y_t=(Ny-1); if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[10*Nx*Ny*Nz+tid_t]=f10; //speed 11 ex=1 ey=1 ez=-1 X_t=X+1; Y_t=Y+1; Z_t=Z-1; if(X_t==Nx) X_t=0; if(Y_t==Ny) Y_t=0; if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[11*Nx*Ny*Nz+tid_t]=f11; //speed 12 ex=-1 ey=1 ez=-1 X_t=X-1; Y_t=Y+1; Z_t=Z-1; if(X_t<0) X_t=(Nx-1); if(Y_t==Ny) Y_t=0; if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[12*Nx*Ny*Nz+tid_t]=f12; //speed 13 ex=1 ey=-1 ez=-1 X_t=X+1; Y_t=Y-1; Z_t=Z-1; if(X_t==Nx) X_t=0; if(Y_t<0) Y_t=(Ny-1); if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[13*Nx*Ny*Nz+tid_t]=f13; //speed 14 ex=ey=ez=-1 X_t=X-1; Y_t=Y-1; Z_t=Z-1; if(X_t<0) X_t=(Nx-1); if(Y_t<0) Y_t=(Ny-1); if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; fOut[14*Nx*Ny*Nz+tid_t]=f14; }//if(X<Nx... } void ldc_D3Q15_LBGK_ts_cuda(float * fOut, const float * fIn, const int * snl, const int * lnl, const float u_bc, const float omega, const int Nx, const int Ny, const int Nz){ dim3 BLOCKS(TPB,TPB,1); dim3 GRIDS((Nx+TPB-1)/TPB,(Ny+TPB-1)/TPB,Nz); ldc_D3Q15_LBGK_ts<<<GRIDS,BLOCKS>>>(fOut,fIn,snl,lnl,u_bc, omega,Nx,Ny,Nz); } __global__ void ldc_D3Q15_LBGK_tsT(float * fOut,const float * fIn, const int * snl, const int * lnl, const float u_bc,const float omega, const int Nx, const int Ny, const int Nz){ int X=threadIdx.x+blockIdx.x*blockDim.x; int Y=threadIdx.y+blockIdx.y*blockDim.y; int Z=threadIdx.z+blockIdx.z*blockDim.z; if((X<Nx)&&(Y<Ny)&&(Z<Nz)){ int tid=X+Y*Nx+Z*Nx*Ny; //3 mul, 3 add float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14; float cu; //load the data into registers // f0=fIn[tid]; f1=fIn[Nx*Ny*Nz+tid]; // f2=fIn[2*Nx*Ny*Nz+tid]; f3=fIn[3*Nx*Ny*Nz+tid]; // f4=fIn[4*Nx*Ny*Nz+tid]; f5=fIn[5*Nx*Ny*Nz+tid]; // f6=fIn[6*Nx*Ny*Nz+tid]; f7=fIn[7*Nx*Ny*Nz+tid]; // f8=fIn[8*Nx*Ny*Nz+tid]; f9=fIn[9*Nx*Ny*Nz+tid]; // f10=fIn[10*Nx*Ny*Nz+tid]; f11=fIn[11*Nx*Ny*Nz+tid]; // f12=fIn[12*Nx*Ny*Nz+tid]; f13=fIn[13*Nx*Ny*Nz+tid]; // f14=fIn[14*Nx*Ny*Nz+tid]; f0=fIn[tid*15]; f1=fIn[tid*15+1]; f2=fIn[tid*15+2]; f3=fIn[tid*15+3]; f4=fIn[tid*15+4]; f5=fIn[tid*15+5]; f6=fIn[tid*15+6]; f7=fIn[tid*15+7]; f8=fIn[tid*15+8]; f9=fIn[tid*15+9]; f10=fIn[tid*15+10]; f11=fIn[tid*15+11]; f12=fIn[tid*15+12]; f13=fIn[tid*15+13]; f14=fIn[tid*15+14]; //compute density float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14;//13 add float ux=f1-f2+f7-f8+f9-f10+f11-f12+f13-f14; ux/=rho; //9 add, 1 mul float uy=f3-f4+f7+f8-f9-f10+f11+f12-f13-f14; uy/=rho;//9 add, 1 mul float uz=f5-f6+f7+f8+f9+f10-f11-f12-f13-f14; uz/=rho;//9 add, 1 mul //if it's a lid node, update // if(lnl[tid]==1){ if((X==0)&&(!((Y==0)||(Y==(Ny-1))||(Z==0)||(Z==(Nz-1))))){ //speed 1 ex=1 ey=ez=0. w=1./9. //6 mul, 1 add cu=3.*(1.)*(-ux); f1+=(1./9.)*rho*cu; //speed 2 ex=-1 ey=ez=0. w=1./9. //6 mul, 1 add cu=3.*(-1.)*(-ux); f2+=(1./9.)*rho*cu; //speed 3 ey=1; ex=ez=0; w=1./9. //6 mul, 2 add cu=3.*(1.)*(u_bc-uy); f3+=(1./9.)*rho*cu; //speed 4 ey=-1; ex=ez=0; w=1./9. //6 mul, 2 add cu=3.*(-1.)*(u_bc-uy); f4+=(1./9.)*rho*cu; //speed 5 ex=ey=0; ez=1; w=1./9. //6 mul, 2 add cu=3.*(1.)*(-uz); f5+=(1./9.)*rho*cu; //speed 6 ex=ey=0; ez=-1; w=1./9. //6 mul, 1 add cu=3.*(-1.)*(-uz); f6+=(1./9.)*rho*cu; //speed 7 ex=ey=ez=1; w=1./72. cu=3.*((1.)*-ux+(1.)*(u_bc-uy)+(1.)*-uz); //9 mul, 4 add f7+=(1./72.)*rho*cu; //speed 8 ex=-1 ey=ez=1; w=1./72. cu=3.*((-1.)*-ux+(1.)*(u_bc-uy)+(1.)*-uz); //9 mul, 4 add f8+=(1./72.)*rho*cu; //speed 9 ex=1 ey=-1 ez=1 cu=3.0*((1.)*-ux+(-1.)*(u_bc-uy)+(1.)*-uz);//9 mul, 4 add f9+=(1./72.)*rho*cu; //speed 10 ex=-1 ey=-1 ez=1 cu=3.0*((-1.)*-ux+(-1.)*(u_bc-uy)+(1.)*-uz); //9 mul, 4 add f10+=(1./72.)*rho*cu; //speed 11 ex=1 ey=1 ez=-1 cu=3.0*((1.)*-ux +(1.)*(u_bc-uy)+(-1.)*-uz); //9 mul, 4 add f11+=(1./72.)*rho*cu; //speed 12 ex=-1 ey=1 ez=-1 cu=3.0*((-1.)*-ux+(1.)*(u_bc-uy)+(-1.)*-uz);// 9 mul, 4 add f12+=(1./72.)*rho*cu; //speed 13 ex=1 ey=-1 ez=-1 w=1./72. cu=3.0*((1.)*-ux+(-1.)*(u_bc-uy)+(-1.)*-uz);//9 mul, 4 add f13+=(1./72.)*rho*cu; //speed 14 ex=ey=ez=-1 w=1./72. cu=3.0*((-1.)*-ux + (-1.)*(u_bc-uy) +(-1.)*-uz); //9 mul, 4 add f14+=(1./72.)*rho*cu; ux=0.; uy=u_bc; uz=0.; }//if(lnl[tid]==1)... //if(snl[tid]==1){ if(((Y==0)||(Y==(Ny-1))||(Z==0)||(Z==(Nz-1))||(X==(Nx-1)))){ // 1--2 cu=f2; f2=f1; f1=cu; //3--4 cu=f4; f4=f3; f3=cu; //5--6 cu=f6; f6=f5; f5=cu; //7--14 cu=f14; f14=f7; f7=cu; //8--13 cu=f13; f13=f8; f8=cu; //9--12 cu=f12; f12=f9; f9=cu; //10--11 cu=f11; f11=f10; f10=cu; }else{ //relax //speed 0 ex=ey=ez=0 w=2./9. float fEq; fEq=rho*(2./9.)*(1.-1.5*(ux*ux+uy*uy+uz*uz)); //est 10 mul, 5 add per speed f0=f0-omega*(f0-fEq); //speed 1 ex=1 ey=ez=0 w=1./9. cu=3.*(1.*ux); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f1=f1-omega*(f1-fEq); //speed 2 ex=-1 ey=ez=0 w=1./9. cu=3.*((-1.)*ux); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f2=f2-omega*(f2-fEq); //speed 3 ex=0 ey=1 ez=0 w=1./9. cu=3.*(1.*uy); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f3=f3-omega*(f3-fEq); //speed 4 ex=0 ey=-1 ez=0 w=1./9. cu=3.*(-1.*uy); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f4=f4-omega*(f4-fEq); //speed 5 ex=ey=0 ez=1 w=1./9. cu=3.*(1.*uz); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f5=f5-omega*(f5-fEq); //speed 6 ex=ey=0 ez=-1 w=1./9. cu=3.*(-1.*uz); fEq=rho*(1./9.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f6=f6-omega*(f6-fEq); //speed 7 ex=ey=ez=1 w=1./72. cu=3.*(ux+uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f7=f7-omega*(f7-fEq); //speed 8 ex=-1 ey=ez=1 w=1./72. cu=3.*(-ux+uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f8=f8-omega*(f8-fEq); //speed 9 ex=1 ey=-1 ez=1 w=1./72. cu=3.*(ux-uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f9=f9-omega*(f9-fEq); //speed 10 ex=-1 ey=-1 ez=1 w=1/72 cu=3.*(-ux-uy+uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f10=f10-omega*(f10-fEq); //speed 11 ex=1 ey=1 ez=-1 w=1/72 cu=3.*(ux+uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f11=f11-omega*(f11-fEq); //speed 12 ex=-1 ey=1 ez=-1 w=1/72 cu=3.*(-ux+uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f12=f12-omega*(f12-fEq); //speed 13 ex=1 ey=ez=-1 w=1/72 cu=3.*(ux-uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f13=f13-omega*(f13-fEq); //speed 14 ex=ey=ez=-1 w=1/72 cu=3.*(-ux-uy-uz); fEq=rho*(1./72.)*(1.+cu+0.5*(cu*cu)- 1.5*(ux*ux+uy*uy+uz*uz)); f14=f14-omega*(f14-fEq); }//if/else snl //now, everybody streams... int X_t, Y_t, Z_t; int tid_t; //speed 0 ex=ey=ez=0 //fOut[tid]=f0; fOut[tid*15]=f0; //speed 1 ex=1 ey=ez=0 // est 5 mul, 3 add per speed X_t=X+1; Y_t=Y; Z_t=Z; if(X_t==Nx) X_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; //fOut[Nx*Ny*Nz+tid_t]=f1; fOut[tid_t*15+1]=f1; //speed 2 ex=-1 ey=ez=0; X_t=X-1; Y_t=Y; Z_t=Z; if(X_t<0) X_t=(Nx-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[2*Nx*Ny*Nz+tid_t]=f2; fOut[tid_t*15+2]=f2; //speed 3 ex=0 ey=1 ez=0 X_t=X; Y_t=Y+1; Z_t=Z; if(Y_t==Ny) Y_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[3*Nx*Ny*Nz+tid_t]=f3; fOut[tid_t*15+3]=f3; //speed 4 ex=0 ey=-1 ez=0 X_t=X; Y_t=Y-1; Z_t=Z; if(Y_t<0) Y_t=(Ny-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; //fOut[4*Nx*Ny*Nz+tid_t]=f4; fOut[tid_t*15+4]=f4; //speed 5 ex=ey=0 ez=1 X_t=X; Y_t=Y; Z_t=Z+1; if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[5*Nx*Ny*Nz+tid_t]=f5; fOut[tid_t*15+5]=f5; //speed 6 ex=ey=0 ez=-1 X_t=X; Y_t=Y; Z_t=Z-1; if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[6*Nx*Ny*Nz+tid_t]=f6; fOut[tid_t*15+6]=f6; //speed 7 ex=ey=ez=1 X_t=X+1; Y_t=Y+1; Z_t=Z+1; if(X_t==Nx) X_t=0; if(Y_t==Ny) Y_t=0; if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[7*Nx*Ny*Nz+tid_t]=f7; fOut[tid_t*15+7]=f7; //speed 8 ex=-1 ey=1 ez=1 X_t=X-1; Y_t=Y+1; Z_t=Z+1; if(X_t<0) X_t=(Nx-1); if(Y_t==Ny) Y_t=0; if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[8*Nx*Ny*Nz+tid_t]=f8; fOut[tid_t*15+8]=f8; //speed 9 ex=1 ey=-1 ez=1 X_t=X+1; Y_t=Y-1; Z_t=Z+1; if(X_t==Nx) X_t=0; if(Y_t<0) Y_t=(Ny-1); if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[9*Nx*Ny*Nz+tid_t]=f9; fOut[tid_t*15+9]=f9; //speed 10 ex=-1 ey=-1 ez=1 X_t=X-1; Y_t=Y-1; Z_t=Z+1; if(X_t<0) X_t=(Nx-1); if(Y_t<0) Y_t=(Ny-1); if(Z_t==Nz) Z_t=0; tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[10*Nx*Ny*Nz+tid_t]=f10; fOut[tid_t*15+10]=f10; //speed 11 ex=1 ey=1 ez=-1 X_t=X+1; Y_t=Y+1; Z_t=Z-1; if(X_t==Nx) X_t=0; if(Y_t==Ny) Y_t=0; if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; //fOut[11*Nx*Ny*Nz+tid_t]=f11; fOut[tid_t*15+11]=f11; //speed 12 ex=-1 ey=1 ez=-1 X_t=X-1; Y_t=Y+1; Z_t=Z-1; if(X_t<0) X_t=(Nx-1); if(Y_t==Ny) Y_t=0; if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; // fOut[12*Nx*Ny*Nz+tid_t]=f12; fOut[tid_t*15+12]=f12; //speed 13 ex=1 ey=-1 ez=-1 X_t=X+1; Y_t=Y-1; Z_t=Z-1; if(X_t==Nx) X_t=0; if(Y_t<0) Y_t=(Ny-1); if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; //fOut[13*Nx*Ny*Nz+tid_t]=f13; fOut[tid_t*15+13]=f13; //speed 14 ex=ey=ez=-1 X_t=X-1; Y_t=Y-1; Z_t=Z-1; if(X_t<0) X_t=(Nx-1); if(Y_t<0) Y_t=(Ny-1); if(Z_t<0) Z_t=(Nz-1); tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny; //fOut[14*Nx*Ny*Nz+tid_t]=f14; fOut[tid_t*15+14]=f14; }//if(X<Nx... } void ldc_D3Q15_LBGK_ts_cudaT(float * fOut, const float * fIn, const int * snl, const int * lnl, const float u_bc, const float omega, const int Nx, const int Ny, const int Nz){ dim3 BLOCKS(TPB,TPB,1); dim3 GRIDS((Nx+TPB-1)/TPB,(Ny+TPB-1)/TPB,Nz); ldc_D3Q15_LBGK_tsT<<<GRIDS,BLOCKS>>>(fOut,fIn,snl,lnl,u_bc, omega,Nx,Ny,Nz); }
5105c410a6b5dee7adcc0f9bd0e322cccd48c4b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Decode_GPU.cuh" #include <cmath> #include <iostream> #include <fstream> #include <string> #include <sstream> #include <assert.h> __device__ void BubleSort_GPU(float a[], int n, int index[]) { int i, j; float x; for (i = 0; i < n; i++) { for (j = 1; j < n - i; j++) { if (a[j - 1] < a[j]) { x = a[j]; a[j] = a[j - 1]; a[j - 1] = x; x = index[j]; index[j] = index[j - 1]; index[j - 1] = x; } } } } __device__ int SortLLRVector_GPU(int GF, float *Entr_v2c, int *index) { BubleSort_GPU(Entr_v2c, GF, index); return 1; } __device__ int DecideLLRVector_GPU(float *LLR, int GF) { float max = 0; int alpha_i; for (int q = 0; q < GF - 1; q++) { if (LLR[q] > max) { max = LLR[q]; alpha_i = q + 1; } } if (max <= 0) { return 0; } else { return alpha_i; } } __device__ void d_BubleSort_GPU(float a[], int n, int index[]) { int i, j; float x; for (i = 0; i < n; i++) { for (j = 1; j < n - i; j++) { if (a[j - 1] > a[j]) { x = a[j]; a[j] = a[j - 1]; a[j - 1] = x; x = index[j]; index[j] = index[j - 1]; index[j - 1] = x; } } } } __device__ int d_SortLLRVector_GPU(int GF, float *Entr_v2c, int *index) { d_BubleSort_GPU(Entr_v2c, GF, index); return 1; } __device__ int d_DecideLLRVector_GPU(float *LLR, int GF) { float min = DBL_MAX; int alpha_i; for (int q = 0; q < GFQ; q++) { if (LLR[q] < min) { min = LLR[q]; alpha_i = q; } } return alpha_i; } __device__ int GetCombCount(int n, int m) { long int i; long int a, b, c, s; // s = a/(b*c) a = b = c = 1; for (i = 1; i <= n; i++) a *= i; for (i = 1; i <= m; i++) b *= i; for (i = 1; i <= n - m; i++) c *= i; s = a / (b * c); return s; } __device__ void swap(int &a, int &b) { int temp = a; a = b; b = temp; } __device__ int GFAdd_GPU(int ele1, int ele2, const unsigned *TableAdd_GPU) { return ele1 ^ ele2; } __device__ int GFMultiply_GPU(int ele1, int ele2, const unsigned *TableMultiply_GPU) { return TableMultiply_GPU[GFQ * ele1 + ele2]; } __device__ int GFInverse_GPU(int ele, const unsigned *TableInverse_GPU) { if (ele == 0) { printf("Div 0 Error!\n"); } return TableInverse_GPU[ele]; } int Decoding_EMS_GPU(const LDPCCode *H, VN *Variablenode, CN *Checknode, int EMS_Nm, int EMS_Nc, int *DecodeOutput, const unsigned *TableMultiply_GPU, const unsigned *TableAdd_GPU, const int *Variablenode_weight, const int *Checknode_weight, const int *Variablenode_linkCNs, const int *Checknode_linkVNs, const int *Checknode_linkVNs_GF, int &iter_number) { hipError_t cudaStatus; // int *sort_Entr_v2c_temp = (int *)malloc(H->Variablenode_num * maxdv * GFQ * sizeof(int)); // memset(sort_Entr_v2c_temp, 0, H->Variablenode_num * maxdv * GFQ * sizeof(int)); int *sort_Entr_v2c; hipMalloc((void **)&sort_Entr_v2c, H->Variablenode_num * maxdv * GFQ * sizeof(int)); float *sort_L_v2c_temp = (float *)malloc(H->Variablenode_num * maxdv * GFQ * sizeof(float)); memset(sort_L_v2c_temp, 0, H->Variablenode_num * maxdv * GFQ * sizeof(float)); float *sort_L_v2c; hipMalloc((void **)&sort_L_v2c, H->Variablenode_num * maxdv * GFQ * sizeof(float)); float *Checknode_L_c2v_temp = (float *)malloc(H->Checknode_num * maxdc * GFQ * sizeof(float)); memset(Checknode_L_c2v_temp, 0, H->Checknode_num * maxdc * GFQ * sizeof(float)); float *Checknode_L_c2v; hipMalloc((void **)&Checknode_L_c2v, H->Checknode_num * maxdc * GFQ * sizeof(float)); // int *index = (int *)malloc((GFQ) * sizeof(int)); float *L_ch_temp = (float *)malloc(H->Variablenode_num * (GFQ - 1) * sizeof(float)); memset(L_ch_temp, 0, H->Variablenode_num * (GFQ - 1) * sizeof(float)); float *L_ch; hipMalloc((void **)&L_ch, H->Variablenode_num * (GFQ - 1) * sizeof(float)); float *LLR_temp = (float *)malloc(H->Variablenode_num * (GFQ - 1) * sizeof(float)); float *LLR; hipMalloc((void **)&LLR, H->Variablenode_num * (GFQ - 1) * sizeof(float)); for (int col = 0; col < H->Variablenode_num; col++) { for (int d = 0; d < Variablenode[col].weight; d++) { Variablenode[col].L_ch[GFQ - 1] = 0; for (int q = 0; q < GFQ; q++) { sort_L_v2c_temp[col * maxdv * GFQ + d * GFQ + q] = Variablenode[col].L_ch[q]; Variablenode[col].sort_L_v2c[d][q] = Variablenode[col].L_ch[q]; } } for (int q = 0; q < GFQ; q++) { L_ch_temp[col * (GFQ - 1) + q] = Variablenode[col].L_ch[q]; } } cudaStatus = hipMemcpy(L_ch, L_ch_temp, H->Variablenode_num * (GFQ - 1) * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { printf("Cannot copy L_ch\n"); exit(0); } cudaStatus = hipMemcpy(sort_L_v2c, sort_L_v2c_temp, H->Variablenode_num * maxdv * GFQ * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { printf("Cannot copy sort_L_v2c\n"); exit(0); } cudaStatus = hipMemcpy(Checknode_L_c2v, Checknode_L_c2v_temp, H->Checknode_num * maxdc * GFQ * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { printf("Cannot copy Checknode_L_c2v\n"); exit(0); } for (int row = 0; row < H->Checknode_num; row++) { for (int d = 0; d < Checknode[row].weight; d++) { memset(Checknode[row].L_c2v[d], 0, (GFQ - 1) * sizeof(float)); } } iter_number = 0; bool decode_correct = true; int *DecodeOutput_GPU; hipMalloc((void **)&DecodeOutput_GPU, H->Variablenode_num * sizeof(int)); while (iter_number < maxIT) { iter_number++; hipLaunchKernelGGL(( Variablenode_EMS), dim3(H->Variablenode_num), dim3(maxdv), 0, 0, (const int *)Variablenode_weight, (const int *)Variablenode_linkCNs, sort_Entr_v2c, sort_L_v2c, Checknode_L_c2v, (const float *)L_ch, LLR, DecodeOutput_GPU, H->Variablenode_num); cudaStatus = hipMemcpy(DecodeOutput, DecodeOutput_GPU, H->Variablenode_num * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { printf("Cannot copy DecodeOutput\n"); exit(0); } // for (int i = 0; i < H->Variablenode_num; i++) // { // for (int q = 0; q < GFQ - 1; q++) // { // Variablenode[i].LLR[q] = LLR_temp[i * (GFQ - 1) + q]; // } // DecodeOutput[i] = DecideLLRVector(Variablenode[i].LLR, GFQ); // } decode_correct = true; int sum_temp = 0; for (int row = 0; row < H->Checknode_num; row++) { for (int i = 0; i < Checknode[row].weight; i++) { sum_temp = GFAdd(sum_temp, GFMultiply(DecodeOutput[Checknode[row].linkVNs[i]], Checknode[row].linkVNs_GF[i])); } if (sum_temp) { decode_correct = false; break; } } if (decode_correct) { hipFree(sort_Entr_v2c); hipFree(sort_L_v2c); hipFree(Checknode_L_c2v); hipFree(LLR); hipFree(L_ch); hipFree(DecodeOutput_GPU); free(L_ch_temp); free(LLR_temp); // free(index); // free(sort_Entr_v2c_temp); free(sort_L_v2c_temp); free(Checknode_L_c2v_temp); iter_number--; return 1; } // message from var to check // for (int col = 0; col < H->Variablenode_num; col++) // { // for (int dv = 0; dv < Variablenode[col].weight; dv++) // { // for (int q = 0; q < GFQ - 1; q++) // { // Variablenode[col].sort_L_v2c[dv][q] = Variablenode[col].LLR[q] - Checknode[Variablenode[col].linkCNs[dv]].L_c2v[index_in_CN(Variablenode, col, dv, Checknode)][q]; // } // Variablenode[col].sort_L_v2c[dv][GFQ - 1] = 0; // } // } // for (int col = 0; col < H->Variablenode_num; col++) // { // for (int dv = 0; dv < Variablenode[col].weight; dv++) // { // for (int i = 0; i < GFQ - 1; i++) // { // index[i] = i + 1; // } // index[GFQ - 1] = 0; // SortLLRVector(GFQ, Variablenode[col].sort_L_v2c[dv], index); // for (int i = 0; i < GFQ; i++) // { // Variablenode[col].sort_Entr_v2c[dv][i] = index[i]; // sort_Entr_v2c_temp[col * maxdv * GFQ + dv * GFQ + i] = index[i]; // sort_L_v2c_temp[col * maxdv * GFQ + dv * GFQ + i] = Variablenode[col].sort_L_v2c[dv][i]; // } // } // } hipLaunchKernelGGL(( Variablenode_Update_EMS), dim3(H->Variablenode_num), dim3(maxdv), 0, 0, (const int *)Variablenode_weight, (const int *)Variablenode_linkCNs, sort_Entr_v2c, sort_L_v2c, Checknode_L_c2v, (const float *)L_ch, LLR, H->Variablenode_num); // cudaStatus = hipMemcpy(sort_Entr_v2c, sort_Entr_v2c_temp, H->Variablenode_num * maxdv * GFQ * sizeof(int), hipMemcpyHostToDevice); // if (cudaStatus != hipSuccess) // { // printf("Cannot copy sort_Entr_v2c\n"); // exit(0); // } // cudaStatus = hipMemcpy(sort_L_v2c, sort_L_v2c_temp, H->Variablenode_num * maxdv * GFQ * sizeof(float), hipMemcpyHostToDevice); // if (cudaStatus != hipSuccess) // { // printf("Cannot copy sort_L_v2c\n"); // exit(0); // } // // message from check to var hipLaunchKernelGGL(( Checknode_EMS), dim3(H->Checknode_num), dim3(maxdc), 0, 0, (const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, EMS_Nm, EMS_Nc, (const int *)Checknode_weight, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_Entr_v2c, sort_L_v2c, Checknode_L_c2v, H->Checknode_num); // Checknode_EMS<<<1, 1>>>((const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, EMS_Nm, EMS_Nc, (const int *)Checknode_weight, (const int *)Variablenode_linkCNs, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_Entr_v2c, sort_L_v2c, Checknode_L_c2v, H->Checknode_num); // cudaStatus = hipMemcpy(Checknode_L_c2v_temp, Checknode_L_c2v, H->Checknode_num * maxdc * GFQ * sizeof(float), hipMemcpyDeviceToHost); // if (cudaStatus != hipSuccess) // { // printf("Cannot copy Checknode_L_c2v D2V\n"); // exit(0); // } // for (int i = 0; i < H->Checknode_num; i++) // { // for (int j = 0; j < Checknode[i].weight; j++) // { // for (int q = 0; q < GFQ - 1; q++) // { // Checknode[i].L_c2v[j][q] = Checknode_L_c2v_temp[i * maxdc * GFQ + j * GFQ + q]; // } // } // } } hipFree(sort_Entr_v2c); hipFree(sort_L_v2c); hipFree(Checknode_L_c2v); hipFree(LLR); hipFree(L_ch); free(L_ch_temp); free(LLR_temp); hipFree(DecodeOutput_GPU); // free(index); // free(sort_Entr_v2c_temp); free(sort_L_v2c_temp); free(Checknode_L_c2v_temp); return 0; } __global__ void Variablenode_EMS(const int *Variablenode_weight, const int *Variablenode_linkCNs, int *sort_Entr_v2c, float *sort_L_v2c, float *Checknode_L_c2v, const float *L_ch, float *LLR, int *DecodeOutput, int Variablenode_num) { int offset; offset = threadIdx.x + blockDim.x * blockIdx.x; if (offset < Variablenode_num * maxdv) { int d = offset % maxdv; offset = offset / maxdv; if (d < Variablenode_weight[offset]) { if (d == 0) { for (int q = 0; q < GFQ - 1; q++) { LLR[offset * (GFQ - 1) + q] = L_ch[offset * (GFQ - 1) + q]; } } __syncthreads(); // for (int d = 0; d < Variablenode_weight[offset]; d++) // { for (int q = 0; q < GFQ - 1; q++) { atomicAdd(&LLR[offset * (GFQ - 1) + q], Checknode_L_c2v[Variablenode_linkCNs[offset * maxdv + d] + q]); } // } __syncthreads(); if (d == 0) { DecodeOutput[offset] = DecideLLRVector_GPU(LLR + offset * (GFQ - 1), GFQ); } } } } __global__ void Variablenode_Update_EMS(const int *Variablenode_weight, const int *Variablenode_linkCNs, int *sort_Entr_v2c, float *sort_L_v2c, float *Checknode_L_c2v, const float *L_ch, float *LLR, int Variablenode_num) { int offset; offset = threadIdx.x + blockDim.x * blockIdx.x; int *index = (int *)malloc(GFQ * sizeof(int)); if (offset < Variablenode_num * maxdv) { int dv = offset % maxdv; offset = offset / maxdv; if (dv < Variablenode_weight[offset]) { // for (int dv = 0; dv < Variablenode_weight[offset]; dv++) // { for (int q = 0; q < GFQ - 1; q++) { sort_L_v2c[offset * maxdv * GFQ + dv * GFQ + q] = LLR[offset * (GFQ - 1) + q] - Checknode_L_c2v[Variablenode_linkCNs[offset * maxdv + dv] + q]; } sort_L_v2c[offset * maxdv * GFQ + dv * GFQ + GFQ - 1] = 0; // } // for (int dv = 0; dv < Variablenode_weight[offset]; dv++) // { for (int i = 0; i < GFQ - 1; i++) { index[i] = i + 1; } index[GFQ - 1] = 0; SortLLRVector_GPU(GFQ, sort_L_v2c + offset * maxdv * GFQ + dv * GFQ, index); for (int i = 0; i < GFQ; i++) { sort_Entr_v2c[offset * maxdv * GFQ + dv * GFQ + i] = index[i]; } // } } } free(index); } /* Checknode_weight: L_c2v:QQQ*Q Variblenode_linkCNs:dvdvdv Checknode_linkVNS:dcdcdc Checknode_linkVNS_GF:dcdcdc sort_Entr_v2c:dvq,q,qdv[][][q] sort_L_v2c:sort_Entr_v2cLLR Checknode_L_c2v:dcqdc[][][q] */ __global__ void Checknode_EMS(const unsigned *TableMultiply_GPU, const unsigned *TableAdd_GPU, int EMS_Nm, int EMS_Nc, const int *Checknode_weight, const int *Checknode_linkVNs, const int *Checknode_linkVNs_GF, int *sort_Entr_v2c, float *sort_L_v2c, float *Checknode_L_c2v, int Checknode_num) { int offset; offset = threadIdx.x + blockDim.x * blockIdx.x; if (offset < Checknode_num * maxdc) { float EMS_L_c2v[GFQ]; int dc = offset % maxdc; offset = offset / maxdc; if (dc < Checknode_weight[offset]) { // reset the sum store vector to the munimum for (int q = 0; q < GFQ; q++) { EMS_L_c2v[q] = -DBL_MAX; } // recursly exhaustly int sumNonele; float sumNonLLR; // conf(q, 1) sumNonele = 0; sumNonLLR = 0; // ConstructConf_GPU((const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, GFQ, 1, sumNonele, sumNonLLR, diff, 0, dc, Checknode_weight[offset] - 1, offset, EMS_L_c2v, (const int *)Variblenode_linkCNs, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_Entr_v2c, sort_L_v2c); for (int i = 0; i < Checknode_weight[offset]; i++) { if (i == dc) { continue; } sumNonele = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i]], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele, TableAdd_GPU); sumNonLLR = sumNonLLR + sort_L_v2c[Checknode_linkVNs[offset * maxdc + i]]; } if (sumNonLLR > EMS_L_c2v[sumNonele]) { EMS_L_c2v[sumNonele] = sumNonLLR; } int sumNonele_all_max = sumNonele; float sumNonLLR_all_max = sumNonLLR; for (int i = 0; i < Checknode_weight[offset]; i++) { if (i == dc) { continue; } sumNonele = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i]], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele_all_max, TableAdd_GPU); sumNonLLR = sumNonLLR_all_max - sort_L_v2c[Checknode_linkVNs[offset * maxdc + i]]; for (int k = 1; k < GFQ; k++) { int sumNonele1 = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i] + k], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele, TableAdd_GPU); float sumNonLLR1 = sumNonLLR + sort_L_v2c[Checknode_linkVNs[offset * maxdc + i] + k]; if (sumNonLLR1 > EMS_L_c2v[sumNonele1]) { EMS_L_c2v[sumNonele1] = sumNonLLR1; } } } // conf(nm, nc) // sumNonele = 0; // sumNonLLR = 0; // diff = 0; // ConstructConf_GPU((const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, EMS_Nm, EMS_Nc, sumNonele, sumNonLLR, diff, 0, dc, Checknode_weight[offset] - 1, offset, EMS_L_c2v, (const int *)Variblenode_linkCNs, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_Entr_v2c, sort_L_v2c); int *bit = new int[Checknode_weight[offset] - 1]; int EMS_Nc_temp; if (EMS_Nc == maxdc - 1) { EMS_Nc_temp = Checknode_weight[offset] - 1; } else { EMS_Nc_temp = EMS_Nc; } for (int choose_n = 2; choose_n <= EMS_Nc_temp; choose_n++) { for (int k = 0; k < Checknode_weight[offset] - 1; k++) { if (k < choose_n) bit[k] = 1; else bit[k] = 0; } int i, j, beg, end; int len = Checknode_weight[offset] - 1; int N = GetCombCount(Checknode_weight[offset] - 1, choose_n); //C(n,count) C(5,3) int *conf_index = (int *)malloc(choose_n * sizeof(int)); memset(conf_index, 0, (choose_n) * sizeof(int)); int flag = 0; while (!flag) { sumNonele = 0; sumNonLLR = 0; for (int i = 0; i < choose_n; i++) { conf_index[i] += 1; // move confset[i] to smaller one if (i == choose_n - 1 && conf_index[i] == EMS_Nm) { // reaches end flag = 1; break; } else if (conf_index[i] >= EMS_Nm) { conf_index[i] = 0; // continue to modify next VN } else { break; // don't modify next VN } } if (!flag) { int k = 0; int t = 0; for (int i = 0; i < Checknode_weight[offset]; i++) { if (i == dc) { continue; } if (bit[t] == 1) { sumNonele = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i] + conf_index[k]], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele, TableAdd_GPU); sumNonLLR = sumNonLLR + sort_L_v2c[Checknode_linkVNs[offset * maxdc + i] + conf_index[k]]; k++; } else { sumNonele = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i]], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele, TableAdd_GPU); sumNonLLR = sumNonLLR + sort_L_v2c[Checknode_linkVNs[offset * maxdc + i]]; } t++; } if (sumNonLLR > EMS_L_c2v[sumNonele]) { EMS_L_c2v[sumNonele] = sumNonLLR; } } } for (j = 1; j < N; j++) { for (i = len - 1; i > 0; i--) { if (bit[i] == 0 && bit[i - 1] == 1) { swap(bit[i], bit[i - 1]); //from index: [i to len-1] , make all bit 1 in the right beg = i; end = len - 1; while (1) { while (bit[beg] == 1) { beg++; if (beg >= len) break; } while (bit[end] == 0) { end--; if (end < i) break; } if (beg < end) swap(bit[beg], bit[end]); else break; } //end of "while" break; } //end of "if" } flag = 0; memset(conf_index, 0, (choose_n) * sizeof(int)); while (!flag) { sumNonele = 0; sumNonLLR = 0; for (int i = 0; i < choose_n; i++) { conf_index[i] += 1; // move confset[i] to smaller one if (i == choose_n - 1 && conf_index[i] == EMS_Nm) { // reaches end flag = 1; break; } else if (conf_index[i] >= EMS_Nm) { conf_index[i] = 0; // continue to modify next VN } else { break; // don't modify next VN } } if (!flag) { int k = 0; int t = 0; for (int i = 0; i < Checknode_weight[offset]; i++) { if (i == dc) { continue; } if (bit[t] == 1) { sumNonele = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i] + conf_index[k]], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele, TableAdd_GPU); sumNonLLR = sumNonLLR + sort_L_v2c[Checknode_linkVNs[offset * maxdc + i] + conf_index[k]]; k++; } else { sumNonele = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i]], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele, TableAdd_GPU); sumNonLLR = sumNonLLR + sort_L_v2c[Checknode_linkVNs[offset * maxdc + i]]; } t++; } if (sumNonLLR > EMS_L_c2v[sumNonele]) { EMS_L_c2v[sumNonele] = sumNonLLR; } } } } free(conf_index); } free(bit); // calculate each c2v LLR int v = 0; Checknode_L_c2v[offset * maxdc * GFQ + dc * GFQ + GFQ - 1] = 0; for (int k = 1; k < GFQ; k++) { v = GFMultiply_GPU(k, Checknode_linkVNs_GF[offset * maxdc + dc], TableMultiply_GPU); Checknode_L_c2v[offset * maxdc * GFQ + dc * GFQ + k - 1] = (EMS_L_c2v[v] - EMS_L_c2v[0]) / 1.2; } } else { for (int k = 0; k < GFQ; k++) { Checknode_L_c2v[offset * maxdc * GFQ + dc * GFQ + k] = 0; } } } } int Decoding_TMM_GPU(const LDPCCode *H, VN *Variablenode, CN *Checknode, int EMS_Nm, int EMS_Nc, int *DecodeOutput, const unsigned *TableMultiply_GPU, const unsigned *TableAdd_GPU, const unsigned *TableInverse_GPU, const int *Variablenode_weight, const int *Checknode_weight, const int *Variablenode_linkCNs, const int *Checknode_linkVNs, const int *Checknode_linkVNs_GF, int &iter_number) { hipError_t cudaStatus; float *sort_L_v2c_temp = (float *)malloc(H->Variablenode_num * maxdv * GFQ * sizeof(float)); memset(sort_L_v2c_temp, 0, H->Variablenode_num * maxdv * GFQ * sizeof(float)); float *sort_L_v2c; hipMalloc((void **)&sort_L_v2c, H->Variablenode_num * maxdv * GFQ * sizeof(float)); float *Checknode_L_c2v_temp = (float *)malloc(H->Checknode_num * maxdc * GFQ * sizeof(float)); memset(Checknode_L_c2v_temp, 0, H->Checknode_num * maxdc * GFQ * sizeof(float)); float *Checknode_L_c2v; hipMalloc((void **)&Checknode_L_c2v, H->Checknode_num * maxdc * GFQ * sizeof(float)); float *LLR_temp = (float *)malloc(H->Variablenode_num * GFQ * sizeof(float)); memset(LLR_temp, 0, H->Variablenode_num * GFQ * sizeof(float)); float *LLR; hipMalloc((void **)&LLR, H->Variablenode_num * GFQ * sizeof(float)); float max = -DBL_MAX; for (int col = 0; col < H->Variablenode_num; col++) { max = -DBL_MAX; for (int q = 0; q < GFQ - 1; q++) { if (Variablenode[col].L_ch[q] > max) { max = Variablenode[col].L_ch[q]; } } for (int d = 0; d < Variablenode[col].weight; d++) { for (int q = 0; q < GFQ; q++) { if (q == 0) { Variablenode[col].sort_L_v2c[d][q] = max; Variablenode[col].LLR[q] = max; sort_L_v2c_temp[col * maxdv * GFQ + d * GFQ + q] = max; LLR_temp[col * GFQ + q] = max; } else { sort_L_v2c_temp[col * maxdv * GFQ + d * GFQ + q] = max - Variablenode[col].L_ch[q - 1]; Variablenode[col].sort_L_v2c[d][q] = max - Variablenode[col].L_ch[q - 1]; Variablenode[col].LLR[q] = max - Variablenode[col].L_ch[q - 1]; LLR_temp[col * GFQ + q] = max - Variablenode[col].L_ch[q - 1]; } } } } cudaStatus = hipMemcpy(LLR, LLR_temp, H->Variablenode_num * GFQ * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { printf("Cannot copy LLR\n"); exit(0); } cudaStatus = hipMemcpy(sort_L_v2c, sort_L_v2c_temp, H->Variablenode_num * maxdv * GFQ * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { printf("Cannot copy sort_L_v2c\n"); exit(0); } cudaStatus = hipMemcpy(Checknode_L_c2v, Checknode_L_c2v_temp, H->Checknode_num * maxdc * GFQ * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { printf("Cannot copy Checknode_L_c2v\n"); exit(0); } for (int row = 0; row < H->Checknode_num; row++) { for (int d = 0; d < Checknode[row].weight; d++) { memset(Checknode[row].L_c2v[d], 0, GFQ * sizeof(float)); } } iter_number = 0; bool decode_correct = true; int *DecodeOutput_GPU; hipMalloc((void **)&DecodeOutput_GPU, H->Variablenode_num * sizeof(int)); while (iter_number < maxIT) { iter_number++; hipLaunchKernelGGL(( Variablenode_TMM), dim3(H->Variablenode_num), dim3(maxdv), 0, 0, (const int *)Variablenode_weight, (const int *)Variablenode_linkCNs, sort_L_v2c, Checknode_L_c2v, LLR, DecodeOutput_GPU, H->Variablenode_num); cudaStatus = hipMemcpy(DecodeOutput, DecodeOutput_GPU, H->Variablenode_num * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { printf("Cannot copy DecodeOutput\n"); exit(0); } decode_correct = true; int sum_temp = 0; for (int row = 0; row < H->Checknode_num; row++) { for (int i = 0; i < Checknode[row].weight; i++) { sum_temp = GFAdd(sum_temp, GFMultiply(DecodeOutput[Checknode[row].linkVNs[i]], Checknode[row].linkVNs_GF[i])); } if (sum_temp) { decode_correct = false; break; } } if (decode_correct) { hipFree(sort_L_v2c); hipFree(Checknode_L_c2v); hipFree(LLR); hipFree(DecodeOutput_GPU); free(LLR_temp); free(sort_L_v2c_temp); free(Checknode_L_c2v_temp); iter_number--; return 1; } hipLaunchKernelGGL(( Variablenode_Update_TMM), dim3(H->Variablenode_num), dim3(maxdv), 0, 0, (const int *)Variablenode_weight, (const int *)Variablenode_linkCNs, sort_L_v2c, Checknode_L_c2v, LLR, H->Variablenode_num); hipLaunchKernelGGL(( Checknode_TMM), dim3(H->Checknode_num), dim3(1), 0, 0, (const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, (const unsigned *)TableInverse_GPU, (const int *)Checknode_weight, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_L_v2c, Checknode_L_c2v, H->Checknode_num); } hipFree(sort_L_v2c); hipFree(Checknode_L_c2v); hipFree(LLR); free(LLR_temp); hipFree(DecodeOutput_GPU); free(sort_L_v2c_temp); free(Checknode_L_c2v_temp); return 0; } __global__ void Variablenode_TMM(const int *Variablenode_weight, const int *Variablenode_linkCNs, float *sort_L_v2c, float *Checknode_L_c2v, float *LLR, int *DecodeOutput, int Variablenode_num) { int offset; offset = threadIdx.x + blockDim.x * blockIdx.x; if (offset < Variablenode_num * maxdv) { int d = offset % maxdv; offset = offset / maxdv; if (d < Variablenode_weight[offset]) { // for (int d = 0; d < Variablenode_weight[offset]; d++) // { for (int q = 0; q < GFQ; q++) { atomicAdd(&LLR[offset * GFQ + q], Checknode_L_c2v[Variablenode_linkCNs[offset * maxdv + d] + q]); } // } } __syncthreads(); if (d == 0) { DecodeOutput[offset] = d_DecideLLRVector_GPU(LLR + offset * GFQ, GFQ); } } } __global__ void Variablenode_Update_TMM(const int *Variablenode_weight, const int *Variablenode_linkCNs, float *sort_L_v2c, float *Checknode_L_c2v, float *LLR, int Variablenode_num) { int offset; offset = threadIdx.x + blockDim.x * blockIdx.x; if (offset < Variablenode_num * maxdv) { int dv = offset % maxdv; offset = offset / maxdv; if (dv < Variablenode_weight[offset]) { // for (int dv = 0; dv < Variablenode_weight[offset]; dv++) // { for (int q = 0; q < GFQ; q++) { sort_L_v2c[offset * maxdv * GFQ + dv * GFQ + q] = LLR[offset * GFQ + q] - Checknode_L_c2v[Variablenode_linkCNs[offset * maxdv + dv] + q]; } // } } } } __global__ void Checknode_TMM(const unsigned *TableMultiply_GPU, const unsigned *TableAdd_GPU, const unsigned *TableInverse_GPU, const int *Checknode_weight, const int *Checknode_linkVNs, const int *Checknode_linkVNs_GF, float *sort_L_v2c, float *Checknode_L_c2v, int Checknode_num) { int offset; offset = threadIdx.x + blockDim.x * blockIdx.x; if (offset < Checknode_num) { int *TMM_Zn = (int *)malloc(maxdc * sizeof(int)); float *TMM_deltaU = (float *)malloc(maxdc * GFQ * sizeof(float)); float *TMM_Min1 = (float *)malloc(GFQ * sizeof(float)); float *TMM_Min2 = (float *)malloc(GFQ * sizeof(float)); int *TMM_Min1_Col = (int *)malloc(GFQ * sizeof(int)); float *TMM_I = (float *)malloc(GFQ * sizeof(float)); int *TMM_Path = (int *)malloc(GFQ * 2 * sizeof(int)); float *TMM_E = (float *)malloc(GFQ * sizeof(float)); float *TMM_Lc2p = (float *)malloc(GFQ * sizeof(float)); int TMM_Syndrome = 0; d_TMM_Get_Zn_GPU((const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, (const unsigned *)TableInverse_GPU, (const int *)Checknode_weight, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_L_v2c, Checknode_L_c2v, TMM_Zn, offset, TMM_Syndrome); d_TMM_Get_deltaU_GPU((const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, (const unsigned *)TableInverse_GPU, (const int *)Checknode_weight, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_L_v2c, Checknode_L_c2v, TMM_Zn, TMM_deltaU, offset); TMM_Get_Min_GPU(Checknode_weight, TMM_Zn, TMM_deltaU, TMM_Min1, TMM_Min2, TMM_Min1_Col, offset); TMM_ConstructConf_GPU((const unsigned *)TableInverse_GPU, TMM_deltaU, TMM_Min1, TMM_Min2, TMM_Min1_Col, TMM_I, TMM_Path, TMM_E); for (int dc = 0; dc < Checknode_weight[offset]; dc++) { // choose to output TMM_Lc2p[0] = 0; for (int eta = 1; eta < GFQ; eta++) { if (dc != TMM_Path[eta * 2 + 0] && dc != TMM_Path[eta * 2 + 1]) { TMM_Lc2p[eta] = TMM_I[eta]; } else { TMM_Lc2p[eta] = TMM_E[eta]; } } int h_inverse = GFInverse_GPU(Checknode_linkVNs_GF[offset * maxdc + dc], TableInverse_GPU); int beta_syn = GFAdd_GPU(TMM_Syndrome, TMM_Zn[dc], TableAdd_GPU); double L0 = TMM_Lc2p[beta_syn]; for (int eta = 0; eta < GFQ; eta++) { int beta = GFMultiply_GPU(h_inverse, GFAdd_GPU(eta, beta_syn, TableAdd_GPU), TableMultiply_GPU); Checknode_L_c2v[offset * maxdc * GFQ + dc * GFQ + beta] = (TMM_Lc2p[eta]) * 0.8; } } free(TMM_Zn); free(TMM_deltaU); free(TMM_Min1); free(TMM_Min2); free(TMM_Min1_Col); free(TMM_I); free(TMM_Path); free(TMM_E); free(TMM_Lc2p); } } __device__ int d_TMM_Get_Zn_GPU(const unsigned *TableMultiply_GPU, const unsigned *TableAdd_GPU, const unsigned *TableInverse_GPU, const int *Checknode_weight, const int *Checknode_linkVNs, const int *Checknode_linkVNs_GF, float *sort_L_v2c, float *Checknode_L_c2v, int *TMM_Zn, int row, int &TMM_Syndrome) { TMM_Syndrome = 0; for (int dc = 0; dc < Checknode_weight[row]; dc++) { double min = DBL_MAX; int min_ele = 0; for (int q = 0; q < GFQ; q++) { if (sort_L_v2c[Checknode_linkVNs[row * maxdc + dc] + q] < min) { min = sort_L_v2c[Checknode_linkVNs[row * maxdc + dc] + q]; min_ele = GFMultiply_GPU(q, Checknode_linkVNs_GF[row * maxdc + dc], TableMultiply_GPU); } } TMM_Zn[dc] = min_ele; TMM_Syndrome = GFAdd_GPU(TMM_Syndrome, min_ele, TableAdd_GPU); } return 0; } __device__ int d_TMM_Get_deltaU_GPU(const unsigned *TableMultiply_GPU, const unsigned *TableAdd_GPU, const unsigned *TableInverse_GPU, const int *Checknode_weight, const int *Checknode_linkVNs, const int *Checknode_linkVNs_GF, float *sort_L_v2c, float *Checknode_L_c2v, int *TMM_Zn, float *TMM_deltaU, int row) { for (int dc = 0; dc < Checknode_weight[row]; dc++) { int h_inverse = GFInverse_GPU(Checknode_linkVNs_GF[row * maxdc + dc], TableInverse_GPU); int beta_p = GFMultiply_GPU(h_inverse, TMM_Zn[dc], TableMultiply_GPU); float min = sort_L_v2c[Checknode_linkVNs[row * maxdc + dc] + beta_p]; for (int x = 0; x < GFQ; x++) { int eta = GFAdd_GPU(x, TMM_Zn[dc], TableAdd_GPU); TMM_deltaU[dc * GFQ + eta] = sort_L_v2c[Checknode_linkVNs[row * maxdc + dc] + GFMultiply_GPU(h_inverse, x, TableMultiply_GPU)] - min; } } return 0; } __device__ int TMM_Get_Min_GPU(const int *Checknode_weight, int *TMM_Zn, float *TMM_deltaU, float *TMM_Min1, float *TMM_Min2, int *TMM_Min1_Col, int row) { // sort for (int q = 0; q < GFQ; q++) { // clear TMM_Min1[q] = DBL_MAX; TMM_Min2[q] = DBL_MAX; // search min and submin for (int dc = 0; dc < Checknode_weight[row]; dc++) { if (TMM_deltaU[dc * GFQ + q] < TMM_Min1[q]) { TMM_Min2[q] = TMM_Min1[q]; TMM_Min1[q] = TMM_deltaU[dc * GFQ + q]; TMM_Min1_Col[q] = dc; } else if (TMM_deltaU[dc * GFQ + q] < TMM_Min2[q]) { TMM_Min2[q] = TMM_deltaU[dc * GFQ + q]; } } } return 0; } __device__ int TMM_ConstructConf_GPU(const unsigned *TableAdd_GPU, float *TMM_deltaU, float *TMM_Min1, float *TMM_Min2, int *TMM_Min1_Col, float *TMM_I, int *TMM_Path, float *TMM_E) { // dQ[0] TMM_I[0] = 0; TMM_Path[0] = TMM_Path[1] = -1; TMM_E[0] = 0; double deviation1, deviation2; for (int i = 1; i < GFQ; i++) { // 1 deviation TMM_I[i] = TMM_deltaU[TMM_Min1_Col[i] * GFQ + i]; TMM_Path[i * 2 + 0] = TMM_Path[i * 2 + 1] = TMM_Min1_Col[i]; TMM_E[i] = TMM_Min2[i]; // 2 deviation for (int j = 0; j < GFQ; j++) { if (j != i) { int k = GFAdd_GPU(i, j, TableAdd_GPU); if (TMM_Min1_Col[j] != TMM_Min1_Col[k]) // { deviation1 = TMM_deltaU[TMM_Min1_Col[j] * GFQ + j]; deviation2 = TMM_deltaU[TMM_Min1_Col[k] * GFQ + k]; if (deviation1 > deviation2 && deviation1 < TMM_I[i]) { TMM_I[i] = deviation1; TMM_Path[i * 2 + 0] = TMM_Min1_Col[j]; TMM_Path[i * 2 + 1] = TMM_Min1_Col[k]; TMM_E[i] = TMM_Min1[i]; } else if (deviation1 < deviation2 && deviation2 < TMM_I[i]) { TMM_I[i] = deviation2; TMM_Path[i * 2 + 0] = TMM_Min1_Col[j]; TMM_Path[i * 2 + 1] = TMM_Min1_Col[k]; TMM_E[i] = TMM_Min1[i]; } } } } } return 0; }
5105c410a6b5dee7adcc0f9bd0e322cccd48c4b2.cu
#include "Decode_GPU.cuh" #include <cmath> #include <iostream> #include <fstream> #include <string> #include <sstream> #include <assert.h> __device__ void BubleSort_GPU(float a[], int n, int index[]) { int i, j; float x; for (i = 0; i < n; i++) { for (j = 1; j < n - i; j++) { if (a[j - 1] < a[j]) { x = a[j]; a[j] = a[j - 1]; a[j - 1] = x; x = index[j]; index[j] = index[j - 1]; index[j - 1] = x; } } } } __device__ int SortLLRVector_GPU(int GF, float *Entr_v2c, int *index) { BubleSort_GPU(Entr_v2c, GF, index); return 1; } __device__ int DecideLLRVector_GPU(float *LLR, int GF) { float max = 0; int alpha_i; for (int q = 0; q < GF - 1; q++) { if (LLR[q] > max) { max = LLR[q]; alpha_i = q + 1; } } if (max <= 0) { return 0; } else { return alpha_i; } } __device__ void d_BubleSort_GPU(float a[], int n, int index[]) { int i, j; float x; for (i = 0; i < n; i++) { for (j = 1; j < n - i; j++) { if (a[j - 1] > a[j]) { x = a[j]; a[j] = a[j - 1]; a[j - 1] = x; x = index[j]; index[j] = index[j - 1]; index[j - 1] = x; } } } } __device__ int d_SortLLRVector_GPU(int GF, float *Entr_v2c, int *index) { d_BubleSort_GPU(Entr_v2c, GF, index); return 1; } __device__ int d_DecideLLRVector_GPU(float *LLR, int GF) { float min = DBL_MAX; int alpha_i; for (int q = 0; q < GFQ; q++) { if (LLR[q] < min) { min = LLR[q]; alpha_i = q; } } return alpha_i; } __device__ int GetCombCount(int n, int m) { long int i; long int a, b, c, s; // s = a/(b*c) a = b = c = 1; for (i = 1; i <= n; i++) a *= i; for (i = 1; i <= m; i++) b *= i; for (i = 1; i <= n - m; i++) c *= i; s = a / (b * c); return s; } __device__ void swap(int &a, int &b) { int temp = a; a = b; b = temp; } __device__ int GFAdd_GPU(int ele1, int ele2, const unsigned *TableAdd_GPU) { return ele1 ^ ele2; } __device__ int GFMultiply_GPU(int ele1, int ele2, const unsigned *TableMultiply_GPU) { return TableMultiply_GPU[GFQ * ele1 + ele2]; } __device__ int GFInverse_GPU(int ele, const unsigned *TableInverse_GPU) { if (ele == 0) { printf("Div 0 Error!\n"); } return TableInverse_GPU[ele]; } int Decoding_EMS_GPU(const LDPCCode *H, VN *Variablenode, CN *Checknode, int EMS_Nm, int EMS_Nc, int *DecodeOutput, const unsigned *TableMultiply_GPU, const unsigned *TableAdd_GPU, const int *Variablenode_weight, const int *Checknode_weight, const int *Variablenode_linkCNs, const int *Checknode_linkVNs, const int *Checknode_linkVNs_GF, int &iter_number) { cudaError_t cudaStatus; // int *sort_Entr_v2c_temp = (int *)malloc(H->Variablenode_num * maxdv * GFQ * sizeof(int)); // memset(sort_Entr_v2c_temp, 0, H->Variablenode_num * maxdv * GFQ * sizeof(int)); int *sort_Entr_v2c; cudaMalloc((void **)&sort_Entr_v2c, H->Variablenode_num * maxdv * GFQ * sizeof(int)); float *sort_L_v2c_temp = (float *)malloc(H->Variablenode_num * maxdv * GFQ * sizeof(float)); memset(sort_L_v2c_temp, 0, H->Variablenode_num * maxdv * GFQ * sizeof(float)); float *sort_L_v2c; cudaMalloc((void **)&sort_L_v2c, H->Variablenode_num * maxdv * GFQ * sizeof(float)); float *Checknode_L_c2v_temp = (float *)malloc(H->Checknode_num * maxdc * GFQ * sizeof(float)); memset(Checknode_L_c2v_temp, 0, H->Checknode_num * maxdc * GFQ * sizeof(float)); float *Checknode_L_c2v; cudaMalloc((void **)&Checknode_L_c2v, H->Checknode_num * maxdc * GFQ * sizeof(float)); // int *index = (int *)malloc((GFQ) * sizeof(int)); float *L_ch_temp = (float *)malloc(H->Variablenode_num * (GFQ - 1) * sizeof(float)); memset(L_ch_temp, 0, H->Variablenode_num * (GFQ - 1) * sizeof(float)); float *L_ch; cudaMalloc((void **)&L_ch, H->Variablenode_num * (GFQ - 1) * sizeof(float)); float *LLR_temp = (float *)malloc(H->Variablenode_num * (GFQ - 1) * sizeof(float)); float *LLR; cudaMalloc((void **)&LLR, H->Variablenode_num * (GFQ - 1) * sizeof(float)); for (int col = 0; col < H->Variablenode_num; col++) { for (int d = 0; d < Variablenode[col].weight; d++) { Variablenode[col].L_ch[GFQ - 1] = 0; for (int q = 0; q < GFQ; q++) { sort_L_v2c_temp[col * maxdv * GFQ + d * GFQ + q] = Variablenode[col].L_ch[q]; Variablenode[col].sort_L_v2c[d][q] = Variablenode[col].L_ch[q]; } } for (int q = 0; q < GFQ; q++) { L_ch_temp[col * (GFQ - 1) + q] = Variablenode[col].L_ch[q]; } } cudaStatus = cudaMemcpy(L_ch, L_ch_temp, H->Variablenode_num * (GFQ - 1) * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("Cannot copy L_ch\n"); exit(0); } cudaStatus = cudaMemcpy(sort_L_v2c, sort_L_v2c_temp, H->Variablenode_num * maxdv * GFQ * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("Cannot copy sort_L_v2c\n"); exit(0); } cudaStatus = cudaMemcpy(Checknode_L_c2v, Checknode_L_c2v_temp, H->Checknode_num * maxdc * GFQ * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("Cannot copy Checknode_L_c2v\n"); exit(0); } for (int row = 0; row < H->Checknode_num; row++) { for (int d = 0; d < Checknode[row].weight; d++) { memset(Checknode[row].L_c2v[d], 0, (GFQ - 1) * sizeof(float)); } } iter_number = 0; bool decode_correct = true; int *DecodeOutput_GPU; cudaMalloc((void **)&DecodeOutput_GPU, H->Variablenode_num * sizeof(int)); while (iter_number < maxIT) { iter_number++; Variablenode_EMS<<<H->Variablenode_num, maxdv>>>((const int *)Variablenode_weight, (const int *)Variablenode_linkCNs, sort_Entr_v2c, sort_L_v2c, Checknode_L_c2v, (const float *)L_ch, LLR, DecodeOutput_GPU, H->Variablenode_num); cudaStatus = cudaMemcpy(DecodeOutput, DecodeOutput_GPU, H->Variablenode_num * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { printf("Cannot copy DecodeOutput\n"); exit(0); } // for (int i = 0; i < H->Variablenode_num; i++) // { // for (int q = 0; q < GFQ - 1; q++) // { // Variablenode[i].LLR[q] = LLR_temp[i * (GFQ - 1) + q]; // } // DecodeOutput[i] = DecideLLRVector(Variablenode[i].LLR, GFQ); // } decode_correct = true; int sum_temp = 0; for (int row = 0; row < H->Checknode_num; row++) { for (int i = 0; i < Checknode[row].weight; i++) { sum_temp = GFAdd(sum_temp, GFMultiply(DecodeOutput[Checknode[row].linkVNs[i]], Checknode[row].linkVNs_GF[i])); } if (sum_temp) { decode_correct = false; break; } } if (decode_correct) { cudaFree(sort_Entr_v2c); cudaFree(sort_L_v2c); cudaFree(Checknode_L_c2v); cudaFree(LLR); cudaFree(L_ch); cudaFree(DecodeOutput_GPU); free(L_ch_temp); free(LLR_temp); // free(index); // free(sort_Entr_v2c_temp); free(sort_L_v2c_temp); free(Checknode_L_c2v_temp); iter_number--; return 1; } // message from var to check // for (int col = 0; col < H->Variablenode_num; col++) // { // for (int dv = 0; dv < Variablenode[col].weight; dv++) // { // for (int q = 0; q < GFQ - 1; q++) // { // Variablenode[col].sort_L_v2c[dv][q] = Variablenode[col].LLR[q] - Checknode[Variablenode[col].linkCNs[dv]].L_c2v[index_in_CN(Variablenode, col, dv, Checknode)][q]; // } // Variablenode[col].sort_L_v2c[dv][GFQ - 1] = 0; // } // } // for (int col = 0; col < H->Variablenode_num; col++) // { // for (int dv = 0; dv < Variablenode[col].weight; dv++) // { // for (int i = 0; i < GFQ - 1; i++) // { // index[i] = i + 1; // } // index[GFQ - 1] = 0; // SortLLRVector(GFQ, Variablenode[col].sort_L_v2c[dv], index); // for (int i = 0; i < GFQ; i++) // { // Variablenode[col].sort_Entr_v2c[dv][i] = index[i]; // sort_Entr_v2c_temp[col * maxdv * GFQ + dv * GFQ + i] = index[i]; // sort_L_v2c_temp[col * maxdv * GFQ + dv * GFQ + i] = Variablenode[col].sort_L_v2c[dv][i]; // } // } // } Variablenode_Update_EMS<<<H->Variablenode_num, maxdv>>>((const int *)Variablenode_weight, (const int *)Variablenode_linkCNs, sort_Entr_v2c, sort_L_v2c, Checknode_L_c2v, (const float *)L_ch, LLR, H->Variablenode_num); // cudaStatus = cudaMemcpy(sort_Entr_v2c, sort_Entr_v2c_temp, H->Variablenode_num * maxdv * GFQ * sizeof(int), cudaMemcpyHostToDevice); // if (cudaStatus != cudaSuccess) // { // printf("Cannot copy sort_Entr_v2c\n"); // exit(0); // } // cudaStatus = cudaMemcpy(sort_L_v2c, sort_L_v2c_temp, H->Variablenode_num * maxdv * GFQ * sizeof(float), cudaMemcpyHostToDevice); // if (cudaStatus != cudaSuccess) // { // printf("Cannot copy sort_L_v2c\n"); // exit(0); // } // // message from check to var Checknode_EMS<<<H->Checknode_num, maxdc>>>((const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, EMS_Nm, EMS_Nc, (const int *)Checknode_weight, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_Entr_v2c, sort_L_v2c, Checknode_L_c2v, H->Checknode_num); // Checknode_EMS<<<1, 1>>>((const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, EMS_Nm, EMS_Nc, (const int *)Checknode_weight, (const int *)Variablenode_linkCNs, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_Entr_v2c, sort_L_v2c, Checknode_L_c2v, H->Checknode_num); // cudaStatus = cudaMemcpy(Checknode_L_c2v_temp, Checknode_L_c2v, H->Checknode_num * maxdc * GFQ * sizeof(float), cudaMemcpyDeviceToHost); // if (cudaStatus != cudaSuccess) // { // printf("Cannot copy Checknode_L_c2v D2V\n"); // exit(0); // } // for (int i = 0; i < H->Checknode_num; i++) // { // for (int j = 0; j < Checknode[i].weight; j++) // { // for (int q = 0; q < GFQ - 1; q++) // { // Checknode[i].L_c2v[j][q] = Checknode_L_c2v_temp[i * maxdc * GFQ + j * GFQ + q]; // } // } // } } cudaFree(sort_Entr_v2c); cudaFree(sort_L_v2c); cudaFree(Checknode_L_c2v); cudaFree(LLR); cudaFree(L_ch); free(L_ch_temp); free(LLR_temp); cudaFree(DecodeOutput_GPU); // free(index); // free(sort_Entr_v2c_temp); free(sort_L_v2c_temp); free(Checknode_L_c2v_temp); return 0; } __global__ void Variablenode_EMS(const int *Variablenode_weight, const int *Variablenode_linkCNs, int *sort_Entr_v2c, float *sort_L_v2c, float *Checknode_L_c2v, const float *L_ch, float *LLR, int *DecodeOutput, int Variablenode_num) { int offset; offset = threadIdx.x + blockDim.x * blockIdx.x; if (offset < Variablenode_num * maxdv) { int d = offset % maxdv; offset = offset / maxdv; if (d < Variablenode_weight[offset]) { if (d == 0) { for (int q = 0; q < GFQ - 1; q++) { LLR[offset * (GFQ - 1) + q] = L_ch[offset * (GFQ - 1) + q]; } } __syncthreads(); // for (int d = 0; d < Variablenode_weight[offset]; d++) // { for (int q = 0; q < GFQ - 1; q++) { atomicAdd(&LLR[offset * (GFQ - 1) + q], Checknode_L_c2v[Variablenode_linkCNs[offset * maxdv + d] + q]); } // } __syncthreads(); if (d == 0) { DecodeOutput[offset] = DecideLLRVector_GPU(LLR + offset * (GFQ - 1), GFQ); } } } } __global__ void Variablenode_Update_EMS(const int *Variablenode_weight, const int *Variablenode_linkCNs, int *sort_Entr_v2c, float *sort_L_v2c, float *Checknode_L_c2v, const float *L_ch, float *LLR, int Variablenode_num) { int offset; offset = threadIdx.x + blockDim.x * blockIdx.x; int *index = (int *)malloc(GFQ * sizeof(int)); if (offset < Variablenode_num * maxdv) { int dv = offset % maxdv; offset = offset / maxdv; if (dv < Variablenode_weight[offset]) { // for (int dv = 0; dv < Variablenode_weight[offset]; dv++) // { for (int q = 0; q < GFQ - 1; q++) { sort_L_v2c[offset * maxdv * GFQ + dv * GFQ + q] = LLR[offset * (GFQ - 1) + q] - Checknode_L_c2v[Variablenode_linkCNs[offset * maxdv + dv] + q]; } sort_L_v2c[offset * maxdv * GFQ + dv * GFQ + GFQ - 1] = 0; // } // for (int dv = 0; dv < Variablenode_weight[offset]; dv++) // { for (int i = 0; i < GFQ - 1; i++) { index[i] = i + 1; } index[GFQ - 1] = 0; SortLLRVector_GPU(GFQ, sort_L_v2c + offset * maxdv * GFQ + dv * GFQ, index); for (int i = 0; i < GFQ; i++) { sort_Entr_v2c[offset * maxdv * GFQ + dv * GFQ + i] = index[i]; } // } } } free(index); } /* Checknode_weight:每一个校验节点的重量 L_c2v:Q个信息,Q个信息,Q个信息,一共校验节点数量*Q个 Variblenode_linkCNs:最大重量dv,每dv个元素代表连接的dv个校验节点的序号 Checknode_linkVNS:最大重量dc,每dc个元素代表连接的dc个变量节点的序号 Checknode_linkVNS_GF:最大重量dc,每dc个元素代表连接的dc个变量节点的多元域值 sort_Entr_v2c:每个变量节点重量dv,q,q,q一共dv个,然后再乘以变量节点个数[变量节点个数][变量节点重量][q] sort_L_v2c:和sort_Entr_v2c对应的LLR Checknode_L_c2v:每个校验节点重量dc,q一共dc个,然后再乘以变量节点个数[校验节点个数][校验节点重量][q] */ __global__ void Checknode_EMS(const unsigned *TableMultiply_GPU, const unsigned *TableAdd_GPU, int EMS_Nm, int EMS_Nc, const int *Checknode_weight, const int *Checknode_linkVNs, const int *Checknode_linkVNs_GF, int *sort_Entr_v2c, float *sort_L_v2c, float *Checknode_L_c2v, int Checknode_num) { int offset; offset = threadIdx.x + blockDim.x * blockIdx.x; if (offset < Checknode_num * maxdc) { float EMS_L_c2v[GFQ]; int dc = offset % maxdc; offset = offset / maxdc; if (dc < Checknode_weight[offset]) { // reset the sum store vector to the munimum for (int q = 0; q < GFQ; q++) { EMS_L_c2v[q] = -DBL_MAX; } // recursly exhaustly int sumNonele; float sumNonLLR; // conf(q, 1) sumNonele = 0; sumNonLLR = 0; // ConstructConf_GPU((const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, GFQ, 1, sumNonele, sumNonLLR, diff, 0, dc, Checknode_weight[offset] - 1, offset, EMS_L_c2v, (const int *)Variblenode_linkCNs, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_Entr_v2c, sort_L_v2c); for (int i = 0; i < Checknode_weight[offset]; i++) { if (i == dc) { continue; } sumNonele = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i]], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele, TableAdd_GPU); sumNonLLR = sumNonLLR + sort_L_v2c[Checknode_linkVNs[offset * maxdc + i]]; } if (sumNonLLR > EMS_L_c2v[sumNonele]) { EMS_L_c2v[sumNonele] = sumNonLLR; } int sumNonele_all_max = sumNonele; float sumNonLLR_all_max = sumNonLLR; for (int i = 0; i < Checknode_weight[offset]; i++) { if (i == dc) { continue; } sumNonele = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i]], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele_all_max, TableAdd_GPU); sumNonLLR = sumNonLLR_all_max - sort_L_v2c[Checknode_linkVNs[offset * maxdc + i]]; for (int k = 1; k < GFQ; k++) { int sumNonele1 = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i] + k], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele, TableAdd_GPU); float sumNonLLR1 = sumNonLLR + sort_L_v2c[Checknode_linkVNs[offset * maxdc + i] + k]; if (sumNonLLR1 > EMS_L_c2v[sumNonele1]) { EMS_L_c2v[sumNonele1] = sumNonLLR1; } } } // conf(nm, nc) // sumNonele = 0; // sumNonLLR = 0; // diff = 0; // ConstructConf_GPU((const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, EMS_Nm, EMS_Nc, sumNonele, sumNonLLR, diff, 0, dc, Checknode_weight[offset] - 1, offset, EMS_L_c2v, (const int *)Variblenode_linkCNs, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_Entr_v2c, sort_L_v2c); int *bit = new int[Checknode_weight[offset] - 1]; int EMS_Nc_temp; if (EMS_Nc == maxdc - 1) { EMS_Nc_temp = Checknode_weight[offset] - 1; } else { EMS_Nc_temp = EMS_Nc; } for (int choose_n = 2; choose_n <= EMS_Nc_temp; choose_n++) { for (int k = 0; k < Checknode_weight[offset] - 1; k++) { if (k < choose_n) bit[k] = 1; else bit[k] = 0; } int i, j, beg, end; int len = Checknode_weight[offset] - 1; int N = GetCombCount(Checknode_weight[offset] - 1, choose_n); //C(n,count) C(5,3) int *conf_index = (int *)malloc(choose_n * sizeof(int)); memset(conf_index, 0, (choose_n) * sizeof(int)); int flag = 0; while (!flag) { sumNonele = 0; sumNonLLR = 0; for (int i = 0; i < choose_n; i++) { conf_index[i] += 1; // move confset[i] to smaller one if (i == choose_n - 1 && conf_index[i] == EMS_Nm) { // reaches end flag = 1; break; } else if (conf_index[i] >= EMS_Nm) { conf_index[i] = 0; // continue to modify next VN } else { break; // don't modify next VN } } if (!flag) { int k = 0; int t = 0; for (int i = 0; i < Checknode_weight[offset]; i++) { if (i == dc) { continue; } if (bit[t] == 1) { sumNonele = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i] + conf_index[k]], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele, TableAdd_GPU); sumNonLLR = sumNonLLR + sort_L_v2c[Checknode_linkVNs[offset * maxdc + i] + conf_index[k]]; k++; } else { sumNonele = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i]], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele, TableAdd_GPU); sumNonLLR = sumNonLLR + sort_L_v2c[Checknode_linkVNs[offset * maxdc + i]]; } t++; } if (sumNonLLR > EMS_L_c2v[sumNonele]) { EMS_L_c2v[sumNonele] = sumNonLLR; } } } for (j = 1; j < N; j++) { for (i = len - 1; i > 0; i--) { if (bit[i] == 0 && bit[i - 1] == 1) { swap(bit[i], bit[i - 1]); //from index: [i to len-1] , make all bit 1 in the right beg = i; end = len - 1; while (1) { while (bit[beg] == 1) { beg++; if (beg >= len) break; } while (bit[end] == 0) { end--; if (end < i) break; } if (beg < end) swap(bit[beg], bit[end]); else break; } //end of "while" break; } //end of "if" } flag = 0; memset(conf_index, 0, (choose_n) * sizeof(int)); while (!flag) { sumNonele = 0; sumNonLLR = 0; for (int i = 0; i < choose_n; i++) { conf_index[i] += 1; // move confset[i] to smaller one if (i == choose_n - 1 && conf_index[i] == EMS_Nm) { // reaches end flag = 1; break; } else if (conf_index[i] >= EMS_Nm) { conf_index[i] = 0; // continue to modify next VN } else { break; // don't modify next VN } } if (!flag) { int k = 0; int t = 0; for (int i = 0; i < Checknode_weight[offset]; i++) { if (i == dc) { continue; } if (bit[t] == 1) { sumNonele = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i] + conf_index[k]], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele, TableAdd_GPU); sumNonLLR = sumNonLLR + sort_L_v2c[Checknode_linkVNs[offset * maxdc + i] + conf_index[k]]; k++; } else { sumNonele = GFAdd_GPU(GFMultiply_GPU(sort_Entr_v2c[Checknode_linkVNs[offset * maxdc + i]], Checknode_linkVNs_GF[offset * maxdc + i], TableMultiply_GPU), sumNonele, TableAdd_GPU); sumNonLLR = sumNonLLR + sort_L_v2c[Checknode_linkVNs[offset * maxdc + i]]; } t++; } if (sumNonLLR > EMS_L_c2v[sumNonele]) { EMS_L_c2v[sumNonele] = sumNonLLR; } } } } free(conf_index); } free(bit); // calculate each c2v LLR int v = 0; Checknode_L_c2v[offset * maxdc * GFQ + dc * GFQ + GFQ - 1] = 0; for (int k = 1; k < GFQ; k++) { v = GFMultiply_GPU(k, Checknode_linkVNs_GF[offset * maxdc + dc], TableMultiply_GPU); Checknode_L_c2v[offset * maxdc * GFQ + dc * GFQ + k - 1] = (EMS_L_c2v[v] - EMS_L_c2v[0]) / 1.2; } } else { for (int k = 0; k < GFQ; k++) { Checknode_L_c2v[offset * maxdc * GFQ + dc * GFQ + k] = 0; } } } } int Decoding_TMM_GPU(const LDPCCode *H, VN *Variablenode, CN *Checknode, int EMS_Nm, int EMS_Nc, int *DecodeOutput, const unsigned *TableMultiply_GPU, const unsigned *TableAdd_GPU, const unsigned *TableInverse_GPU, const int *Variablenode_weight, const int *Checknode_weight, const int *Variablenode_linkCNs, const int *Checknode_linkVNs, const int *Checknode_linkVNs_GF, int &iter_number) { cudaError_t cudaStatus; float *sort_L_v2c_temp = (float *)malloc(H->Variablenode_num * maxdv * GFQ * sizeof(float)); memset(sort_L_v2c_temp, 0, H->Variablenode_num * maxdv * GFQ * sizeof(float)); float *sort_L_v2c; cudaMalloc((void **)&sort_L_v2c, H->Variablenode_num * maxdv * GFQ * sizeof(float)); float *Checknode_L_c2v_temp = (float *)malloc(H->Checknode_num * maxdc * GFQ * sizeof(float)); memset(Checknode_L_c2v_temp, 0, H->Checknode_num * maxdc * GFQ * sizeof(float)); float *Checknode_L_c2v; cudaMalloc((void **)&Checknode_L_c2v, H->Checknode_num * maxdc * GFQ * sizeof(float)); float *LLR_temp = (float *)malloc(H->Variablenode_num * GFQ * sizeof(float)); memset(LLR_temp, 0, H->Variablenode_num * GFQ * sizeof(float)); float *LLR; cudaMalloc((void **)&LLR, H->Variablenode_num * GFQ * sizeof(float)); float max = -DBL_MAX; for (int col = 0; col < H->Variablenode_num; col++) { max = -DBL_MAX; for (int q = 0; q < GFQ - 1; q++) { if (Variablenode[col].L_ch[q] > max) { max = Variablenode[col].L_ch[q]; } } for (int d = 0; d < Variablenode[col].weight; d++) { for (int q = 0; q < GFQ; q++) { if (q == 0) { Variablenode[col].sort_L_v2c[d][q] = max; Variablenode[col].LLR[q] = max; sort_L_v2c_temp[col * maxdv * GFQ + d * GFQ + q] = max; LLR_temp[col * GFQ + q] = max; } else { sort_L_v2c_temp[col * maxdv * GFQ + d * GFQ + q] = max - Variablenode[col].L_ch[q - 1]; Variablenode[col].sort_L_v2c[d][q] = max - Variablenode[col].L_ch[q - 1]; Variablenode[col].LLR[q] = max - Variablenode[col].L_ch[q - 1]; LLR_temp[col * GFQ + q] = max - Variablenode[col].L_ch[q - 1]; } } } } cudaStatus = cudaMemcpy(LLR, LLR_temp, H->Variablenode_num * GFQ * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("Cannot copy LLR\n"); exit(0); } cudaStatus = cudaMemcpy(sort_L_v2c, sort_L_v2c_temp, H->Variablenode_num * maxdv * GFQ * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("Cannot copy sort_L_v2c\n"); exit(0); } cudaStatus = cudaMemcpy(Checknode_L_c2v, Checknode_L_c2v_temp, H->Checknode_num * maxdc * GFQ * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("Cannot copy Checknode_L_c2v\n"); exit(0); } for (int row = 0; row < H->Checknode_num; row++) { for (int d = 0; d < Checknode[row].weight; d++) { memset(Checknode[row].L_c2v[d], 0, GFQ * sizeof(float)); } } iter_number = 0; bool decode_correct = true; int *DecodeOutput_GPU; cudaMalloc((void **)&DecodeOutput_GPU, H->Variablenode_num * sizeof(int)); while (iter_number < maxIT) { iter_number++; Variablenode_TMM<<<H->Variablenode_num, maxdv>>>((const int *)Variablenode_weight, (const int *)Variablenode_linkCNs, sort_L_v2c, Checknode_L_c2v, LLR, DecodeOutput_GPU, H->Variablenode_num); cudaStatus = cudaMemcpy(DecodeOutput, DecodeOutput_GPU, H->Variablenode_num * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { printf("Cannot copy DecodeOutput\n"); exit(0); } decode_correct = true; int sum_temp = 0; for (int row = 0; row < H->Checknode_num; row++) { for (int i = 0; i < Checknode[row].weight; i++) { sum_temp = GFAdd(sum_temp, GFMultiply(DecodeOutput[Checknode[row].linkVNs[i]], Checknode[row].linkVNs_GF[i])); } if (sum_temp) { decode_correct = false; break; } } if (decode_correct) { cudaFree(sort_L_v2c); cudaFree(Checknode_L_c2v); cudaFree(LLR); cudaFree(DecodeOutput_GPU); free(LLR_temp); free(sort_L_v2c_temp); free(Checknode_L_c2v_temp); iter_number--; return 1; } Variablenode_Update_TMM<<<H->Variablenode_num, maxdv>>>((const int *)Variablenode_weight, (const int *)Variablenode_linkCNs, sort_L_v2c, Checknode_L_c2v, LLR, H->Variablenode_num); Checknode_TMM<<<H->Checknode_num, 1>>>((const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, (const unsigned *)TableInverse_GPU, (const int *)Checknode_weight, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_L_v2c, Checknode_L_c2v, H->Checknode_num); } cudaFree(sort_L_v2c); cudaFree(Checknode_L_c2v); cudaFree(LLR); free(LLR_temp); cudaFree(DecodeOutput_GPU); free(sort_L_v2c_temp); free(Checknode_L_c2v_temp); return 0; } __global__ void Variablenode_TMM(const int *Variablenode_weight, const int *Variablenode_linkCNs, float *sort_L_v2c, float *Checknode_L_c2v, float *LLR, int *DecodeOutput, int Variablenode_num) { int offset; offset = threadIdx.x + blockDim.x * blockIdx.x; if (offset < Variablenode_num * maxdv) { int d = offset % maxdv; offset = offset / maxdv; if (d < Variablenode_weight[offset]) { // for (int d = 0; d < Variablenode_weight[offset]; d++) // { for (int q = 0; q < GFQ; q++) { atomicAdd(&LLR[offset * GFQ + q], Checknode_L_c2v[Variablenode_linkCNs[offset * maxdv + d] + q]); } // } } __syncthreads(); if (d == 0) { DecodeOutput[offset] = d_DecideLLRVector_GPU(LLR + offset * GFQ, GFQ); } } } __global__ void Variablenode_Update_TMM(const int *Variablenode_weight, const int *Variablenode_linkCNs, float *sort_L_v2c, float *Checknode_L_c2v, float *LLR, int Variablenode_num) { int offset; offset = threadIdx.x + blockDim.x * blockIdx.x; if (offset < Variablenode_num * maxdv) { int dv = offset % maxdv; offset = offset / maxdv; if (dv < Variablenode_weight[offset]) { // for (int dv = 0; dv < Variablenode_weight[offset]; dv++) // { for (int q = 0; q < GFQ; q++) { sort_L_v2c[offset * maxdv * GFQ + dv * GFQ + q] = LLR[offset * GFQ + q] - Checknode_L_c2v[Variablenode_linkCNs[offset * maxdv + dv] + q]; } // } } } } __global__ void Checknode_TMM(const unsigned *TableMultiply_GPU, const unsigned *TableAdd_GPU, const unsigned *TableInverse_GPU, const int *Checknode_weight, const int *Checknode_linkVNs, const int *Checknode_linkVNs_GF, float *sort_L_v2c, float *Checknode_L_c2v, int Checknode_num) { int offset; offset = threadIdx.x + blockDim.x * blockIdx.x; if (offset < Checknode_num) { int *TMM_Zn = (int *)malloc(maxdc * sizeof(int)); float *TMM_deltaU = (float *)malloc(maxdc * GFQ * sizeof(float)); float *TMM_Min1 = (float *)malloc(GFQ * sizeof(float)); float *TMM_Min2 = (float *)malloc(GFQ * sizeof(float)); int *TMM_Min1_Col = (int *)malloc(GFQ * sizeof(int)); float *TMM_I = (float *)malloc(GFQ * sizeof(float)); int *TMM_Path = (int *)malloc(GFQ * 2 * sizeof(int)); float *TMM_E = (float *)malloc(GFQ * sizeof(float)); float *TMM_Lc2p = (float *)malloc(GFQ * sizeof(float)); int TMM_Syndrome = 0; d_TMM_Get_Zn_GPU((const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, (const unsigned *)TableInverse_GPU, (const int *)Checknode_weight, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_L_v2c, Checknode_L_c2v, TMM_Zn, offset, TMM_Syndrome); d_TMM_Get_deltaU_GPU((const unsigned *)TableMultiply_GPU, (const unsigned *)TableAdd_GPU, (const unsigned *)TableInverse_GPU, (const int *)Checknode_weight, (const int *)Checknode_linkVNs, (const int *)Checknode_linkVNs_GF, sort_L_v2c, Checknode_L_c2v, TMM_Zn, TMM_deltaU, offset); TMM_Get_Min_GPU(Checknode_weight, TMM_Zn, TMM_deltaU, TMM_Min1, TMM_Min2, TMM_Min1_Col, offset); TMM_ConstructConf_GPU((const unsigned *)TableInverse_GPU, TMM_deltaU, TMM_Min1, TMM_Min2, TMM_Min1_Col, TMM_I, TMM_Path, TMM_E); for (int dc = 0; dc < Checknode_weight[offset]; dc++) { // choose to output TMM_Lc2p[0] = 0; for (int eta = 1; eta < GFQ; eta++) { if (dc != TMM_Path[eta * 2 + 0] && dc != TMM_Path[eta * 2 + 1]) { TMM_Lc2p[eta] = TMM_I[eta]; } else { TMM_Lc2p[eta] = TMM_E[eta]; } } int h_inverse = GFInverse_GPU(Checknode_linkVNs_GF[offset * maxdc + dc], TableInverse_GPU); int beta_syn = GFAdd_GPU(TMM_Syndrome, TMM_Zn[dc], TableAdd_GPU); double L0 = TMM_Lc2p[beta_syn]; for (int eta = 0; eta < GFQ; eta++) { int beta = GFMultiply_GPU(h_inverse, GFAdd_GPU(eta, beta_syn, TableAdd_GPU), TableMultiply_GPU); Checknode_L_c2v[offset * maxdc * GFQ + dc * GFQ + beta] = (TMM_Lc2p[eta]) * 0.8; } } free(TMM_Zn); free(TMM_deltaU); free(TMM_Min1); free(TMM_Min2); free(TMM_Min1_Col); free(TMM_I); free(TMM_Path); free(TMM_E); free(TMM_Lc2p); } } __device__ int d_TMM_Get_Zn_GPU(const unsigned *TableMultiply_GPU, const unsigned *TableAdd_GPU, const unsigned *TableInverse_GPU, const int *Checknode_weight, const int *Checknode_linkVNs, const int *Checknode_linkVNs_GF, float *sort_L_v2c, float *Checknode_L_c2v, int *TMM_Zn, int row, int &TMM_Syndrome) { TMM_Syndrome = 0; for (int dc = 0; dc < Checknode_weight[row]; dc++) { double min = DBL_MAX; int min_ele = 0; for (int q = 0; q < GFQ; q++) { if (sort_L_v2c[Checknode_linkVNs[row * maxdc + dc] + q] < min) { min = sort_L_v2c[Checknode_linkVNs[row * maxdc + dc] + q]; min_ele = GFMultiply_GPU(q, Checknode_linkVNs_GF[row * maxdc + dc], TableMultiply_GPU); } } TMM_Zn[dc] = min_ele; TMM_Syndrome = GFAdd_GPU(TMM_Syndrome, min_ele, TableAdd_GPU); } return 0; } __device__ int d_TMM_Get_deltaU_GPU(const unsigned *TableMultiply_GPU, const unsigned *TableAdd_GPU, const unsigned *TableInverse_GPU, const int *Checknode_weight, const int *Checknode_linkVNs, const int *Checknode_linkVNs_GF, float *sort_L_v2c, float *Checknode_L_c2v, int *TMM_Zn, float *TMM_deltaU, int row) { for (int dc = 0; dc < Checknode_weight[row]; dc++) { int h_inverse = GFInverse_GPU(Checknode_linkVNs_GF[row * maxdc + dc], TableInverse_GPU); int beta_p = GFMultiply_GPU(h_inverse, TMM_Zn[dc], TableMultiply_GPU); float min = sort_L_v2c[Checknode_linkVNs[row * maxdc + dc] + beta_p]; for (int x = 0; x < GFQ; x++) { int eta = GFAdd_GPU(x, TMM_Zn[dc], TableAdd_GPU); TMM_deltaU[dc * GFQ + eta] = sort_L_v2c[Checknode_linkVNs[row * maxdc + dc] + GFMultiply_GPU(h_inverse, x, TableMultiply_GPU)] - min; } } return 0; } __device__ int TMM_Get_Min_GPU(const int *Checknode_weight, int *TMM_Zn, float *TMM_deltaU, float *TMM_Min1, float *TMM_Min2, int *TMM_Min1_Col, int row) { // sort for (int q = 0; q < GFQ; q++) { // clear TMM_Min1[q] = DBL_MAX; TMM_Min2[q] = DBL_MAX; // search min and submin for (int dc = 0; dc < Checknode_weight[row]; dc++) { if (TMM_deltaU[dc * GFQ + q] < TMM_Min1[q]) { TMM_Min2[q] = TMM_Min1[q]; TMM_Min1[q] = TMM_deltaU[dc * GFQ + q]; TMM_Min1_Col[q] = dc; } else if (TMM_deltaU[dc * GFQ + q] < TMM_Min2[q]) { TMM_Min2[q] = TMM_deltaU[dc * GFQ + q]; } } } return 0; } __device__ int TMM_ConstructConf_GPU(const unsigned *TableAdd_GPU, float *TMM_deltaU, float *TMM_Min1, float *TMM_Min2, int *TMM_Min1_Col, float *TMM_I, int *TMM_Path, float *TMM_E) { // dQ[0] TMM_I[0] = 0; TMM_Path[0] = TMM_Path[1] = -1; TMM_E[0] = 0; double deviation1, deviation2; for (int i = 1; i < GFQ; i++) { // 1 deviation TMM_I[i] = TMM_deltaU[TMM_Min1_Col[i] * GFQ + i]; TMM_Path[i * 2 + 0] = TMM_Path[i * 2 + 1] = TMM_Min1_Col[i]; TMM_E[i] = TMM_Min2[i]; // 2 deviation for (int j = 0; j < GFQ; j++) { if (j != i) { int k = GFAdd_GPU(i, j, TableAdd_GPU); if (TMM_Min1_Col[j] != TMM_Min1_Col[k]) // 不在同一列 { deviation1 = TMM_deltaU[TMM_Min1_Col[j] * GFQ + j]; deviation2 = TMM_deltaU[TMM_Min1_Col[k] * GFQ + k]; if (deviation1 > deviation2 && deviation1 < TMM_I[i]) { TMM_I[i] = deviation1; TMM_Path[i * 2 + 0] = TMM_Min1_Col[j]; TMM_Path[i * 2 + 1] = TMM_Min1_Col[k]; TMM_E[i] = TMM_Min1[i]; } else if (deviation1 < deviation2 && deviation2 < TMM_I[i]) { TMM_I[i] = deviation2; TMM_Path[i * 2 + 0] = TMM_Min1_Col[j]; TMM_Path[i * 2 + 1] = TMM_Min1_Col[k]; TMM_E[i] = TMM_Min1[i]; } } } } } return 0; }
63fe4e94fdb2e84ef5a9c454d22de3efac8a80ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // GPU (NVCC ) __global__ void addKernel(int* c, const int * a, const int * b) { int i = threadIdx.x; c[i] = a[i] + b[i]; }
63fe4e94fdb2e84ef5a9c454d22de3efac8a80ab.cu
#include "includes.h" // GPU¸¦ À§ÇÑ Ä¿³Î ÇÁ·Î±×·¥(NVCC°¡ ÄÄÆÄÀÏÇÔ) __global__ void addKernel(int* c, const int * a, const int * b) { int i = threadIdx.x; c[i] = a[i] + b[i]; }
1e10ccd431055b0a55d5e330162c0b702bdfa698.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* MCMPC.cu */ #include<stdio.h> #include "../include/MCMPC.cuh" void MCMPC_by_weighted_mean( InputSequences *Output, MonteCarloMPC *PrCtr, int uIndex) { float totalWeight = 0.0f; float temp[HORIZON] = { }; for(int i = 0; i < NUM_OF_ELITESAMPLE; i++){ if(isnan(PrCtr[i].W)) { totalWeight += 0.0f; }else{ totalWeight += PrCtr[i].W; } } for(int i = 0; i < HORIZON; i++){ for(int k = 0; k < NUM_OF_ELITESAMPLE; k++){ if(isnan(PrCtr[k].W)) { temp[i] += 0.0f; }else{ temp[i] += (PrCtr[k].W * PrCtr[k].InputSeq[uIndex][i]) / totalWeight; } } if(isnan(temp[i])) { Output[i].InputSeq[uIndex] = 0.0f; }else{ Output[i].InputSeq[uIndex] = temp[i]; } } } void StateUpdate( Controller *CtrPrm, float *hSt) { for(int i = 0; i < DIM_OF_STATE; i++) { CtrPrm->State[i] = hSt[i]; } } __device__ void MemCpyInThread(float *prm, float *cnstrnt, float *mtrx, Controller *Ctr) { for(int i = 0; i < NUM_OF_PARAMS; i++){ prm[i] = Ctr->Param[i]; } for(int i = 0; i < NUM_OF_CONSTRAINTS; i++){ cnstrnt[i] = Ctr->Constraints[i]; } for(int i = 0; i < DIM_OF_WEIGHT_MATRIX; i++){ mtrx[i] = Ctr->WeightMatrix[i]; } } __global__ void setup_kernel(hiprandState_t *state,int seed) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ hiprand_init(seed, id, 0, &state[id]); } __device__ float gen_u(unsigned int id, hiprandState_t *state, float ave, float vr) { float u; hiprandState_t localState = state[id]; u = hiprand_normal(&localState) * vr + ave; return u; } __global__ void MCMPC_callback_elite_sample(MonteCarloMPC *OutPtElt, MonteCarloMPC *AllSmplDt, int *indices) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; OutPtElt[id] = AllSmplDt[indices[id]]; __syncthreads(); /*OutPtElt[id].W = AllSmplDt[indices[id]].W; OutPtElt[id].L = AllSmplDt[indices[id]].L; for(int t = 0; t < HORIZON; t++){ for(intk = 0; k < DIM_OF_U; k++){ OutPtElt[id].InputSeq[k][t] = AllSmplDt[indices[id]].L } }*/ } __global__ void MCMPC_callback_elite_sample_by_IT(MonteCarloMPC *OutPtElt, MonteCarloMPC *AllSmplDt, int *indices) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; // OutPtElt[id] = AllSmplDt[indices[id]]; // __syncthreads(); float mdCost = 0.0f; float lambda = 1.0f; OutPtElt[id].L = AllSmplDt[indices[id]].L; mdCost = AllSmplDt[indices[id]].L - AllSmplDt[indices[0]].L; OutPtElt[id].W = exp( -mdCost / lambda ); for(int t = 0; t < HORIZON; t++){ for(int k = 0; k < DIM_OF_U; k++){ OutPtElt[id].InputSeq[k][t] = AllSmplDt[indices[id]].L; } } } __global__ void MCMPC_Cart_and_Single_Pole(MonteCarloMPC *PrCtr, hiprandState_t *randomSeed, Controller *Ctr, InputSequences *mean, float var, float *cost) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; seq = id; float stageCost = 0.0f; float totalCost = 0.0f; // float u[HORIZON] = { }; InputSequences *InputSeqInThread; InputSeqInThread = (InputSequences *)malloc(sizeof(InputSeqInThread) * HORIZON); float stateInThisThreads[DIM_OF_STATE] = { }; float dstateInThisThreads[DIM_OF_STATE] = { }; float d_param[NUM_OF_PARAMS], d_constraints[NUM_OF_CONSTRAINTS], d_matrix[DIM_OF_WEIGHT_MATRIX]; MemCpyInThread(d_param, d_constraints, d_matrix, Ctr); for(int i = 0; i < DIM_OF_STATE; i++){ stateInThisThreads[i] = Ctr->State[i]; } for(int t = 0; t < HORIZON; t++) { for(int uIndex = 0; uIndex < DIM_OF_U; uIndex++ ){ if(isnan(mean[t].InputSeq[uIndex])){ //u[t] = d_data[0].Input[t]; if(t < HORIZON -1){ // u[t] = gen_u(seq, randomSeed, PrCtr[0].InputSeq[uIndex][t+1], var); InputSeqInThread[t].InputSeq[uIndex] = gen_u(seq, randomSeed, PrCtr[0].InputSeq[uIndex][t+1], var); seq += NUM_OF_SAMPLES; }else{ // u[t] = gen_u(seq, randomSeed, PrCtr[0].InputSeq[uIndex][HORIZON - 1], var); InputSeqInThread[t].InputSeq[uIndex] = gen_u(seq, randomSeed, PrCtr[0].InputSeq[uIndex][HORIZON-1], var); seq += NUM_OF_SAMPLES; } }else{ // u[t] = gen_u(seq, randomSeed, mean[t].InputSeq[uIndex], var); InputSeqInThread[t].InputSeq[uIndex] = gen_u(seq, randomSeed, mean[t].InputSeq[uIndex], var); seq += NUM_OF_SAMPLES; } if(InputSeqInThread[t].InputSeq[uIndex] < Ctr->Constraints[0]){ InputSeqInThread[t].InputSeq[uIndex] = Ctr->Constraints[0]; } if(InputSeqInThread[t].InputSeq[uIndex] > Ctr->Constraints[1]){ InputSeqInThread[t].InputSeq[uIndex] = Ctr->Constraints[1]; } } for(int sec = 0; sec < 2; sec++){ dstateInThisThreads[0] = stateInThisThreads[2]; dstateInThisThreads[1] = stateInThisThreads[3]; /* dstateInThisThreads[2] = Cart_type_Pendulum_ddx(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); //ddx dstateInThisThreads[3] = Cart_type_Pendulum_ddtheta(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); */ dstateInThisThreads[2] = Cart_type_Pendulum_ddx(InputSeqInThread[t].InputSeq[0], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); //ddx dstateInThisThreads[3] = Cart_type_Pendulum_ddtheta(InputSeqInThread[t].InputSeq[0], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); stateInThisThreads[2] = stateInThisThreads[2] + (interval * dstateInThisThreads[2]); stateInThisThreads[3] = stateInThisThreads[3] + (interval * dstateInThisThreads[3]); stateInThisThreads[0] = stateInThisThreads[0] + (interval * dstateInThisThreads[0]); stateInThisThreads[1] = stateInThisThreads[1] + (interval * dstateInThisThreads[1]); #ifdef COLLISION if(stateInThisThreads[0] <= d_constraints[2]){ float collide[3] = { }; collide[0] = d_param[0] * d_param[1] * cosf(stateInThisThreads[1]); collide[1] = d_param[2] + d_param[0] * powf(d_param[1],2); collide[2] = collide[0] / collide[1]; stateInThisThreads[3] = stateInThisThreads[3] + (1 + d_param[7]) * collide[2] * stateInThisThreads[2]; //dtheta = dtheta + (1+e) * F * dx stateInThisThreads[2] = -d_param[7] * stateInThisThreads[2]; // dx = -e * dx stateInThisThreads[0] = d_constraints[2]; } if(d_constraints[3] <= stateInThisThreads[0]){ float collide[3] = { }; collide[0] = d_param[0] * d_param[1] * cosf(stateInThisThreads[1]); collide[1] = d_param[2] + d_param[0] * powf(d_param[1],2); collide[2] = collide[0] / collide[1]; stateInThisThreads[3] = stateInThisThreads[3] + (1 + d_param[7]) * collide[2] * stateInThisThreads[2]; //dtheta = dtheta + (1+e) * F * dx stateInThisThreads[2] = -d_param[7] * stateInThisThreads[2]; // dx = -e * dx stateInThisThreads[0] = d_constraints[3]; } #endif } while(stateInThisThreads[1] > M_PI) stateInThisThreads[1] -= (2 * M_PI); while(stateInThisThreads[1] < -M_PI) stateInThisThreads[1] += (2 * M_PI); /* stageCost = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4]; */ stageCost = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + InputSeqInThread[t].InputSeq[0] * InputSeqInThread[t].InputSeq[0] * d_matrix[4]; #ifndef COLLISION if(stateInThisThreads[0] <= 0){ stageCost += 1 / (powf(stateInThisThreads[0] - d_constraints[2],2) * invBarrier); if(stateInThisThreads[0] < d_constraints[2]){ stageCost += 1000000; } }else{ stageCost += 1 / (powf(d_constraints[3] - stateInThisThreads[0],2) * invBarrier); if(stateInThisThreads[0] > d_constraints[3]){ stageCost += 1000000; } } #endif totalCost += stageCost; stageCost = 0.0f; } if(isnan(totalCost)) { totalCost = 1000000 * (DIM_OF_STATE + DIM_OF_U); } float KL_COST, S, lambda; float HM_COST, HM; lambda = DIM_OF_STATE * HORIZON; // Using Constant Lambda S = totalCost / lambda; HM = totalCost / (0.75 * HORIZON); HM_COST = exp(-HM); if(isnan(HM_COST)){ HM_COST = 0.0f; }else{ HM_COST = HM_COST; } KL_COST = exp(-S); __syncthreads(); PrCtr[id].WHM = HM_COST; PrCtr[id].L = totalCost; PrCtr[id].W = KL_COST; cost[id] = totalCost; for(int i = 0; i < HORIZON; i++){ for(int k = 0; k < DIM_OF_U; k++) { PrCtr[id].InputSeq[k][i] = InputSeqInThread[i].InputSeq[k]; } } free(InputSeqInThread); __syncthreads(); }
1e10ccd431055b0a55d5e330162c0b702bdfa698.cu
/* MCMPC.cu */ #include<stdio.h> #include "../include/MCMPC.cuh" void MCMPC_by_weighted_mean( InputSequences *Output, MonteCarloMPC *PrCtr, int uIndex) { float totalWeight = 0.0f; float temp[HORIZON] = { }; for(int i = 0; i < NUM_OF_ELITESAMPLE; i++){ if(isnan(PrCtr[i].W)) { totalWeight += 0.0f; }else{ totalWeight += PrCtr[i].W; } } for(int i = 0; i < HORIZON; i++){ for(int k = 0; k < NUM_OF_ELITESAMPLE; k++){ if(isnan(PrCtr[k].W)) { temp[i] += 0.0f; }else{ temp[i] += (PrCtr[k].W * PrCtr[k].InputSeq[uIndex][i]) / totalWeight; } } if(isnan(temp[i])) { Output[i].InputSeq[uIndex] = 0.0f; }else{ Output[i].InputSeq[uIndex] = temp[i]; } } } void StateUpdate( Controller *CtrPrm, float *hSt) { for(int i = 0; i < DIM_OF_STATE; i++) { CtrPrm->State[i] = hSt[i]; } } __device__ void MemCpyInThread(float *prm, float *cnstrnt, float *mtrx, Controller *Ctr) { for(int i = 0; i < NUM_OF_PARAMS; i++){ prm[i] = Ctr->Param[i]; } for(int i = 0; i < NUM_OF_CONSTRAINTS; i++){ cnstrnt[i] = Ctr->Constraints[i]; } for(int i = 0; i < DIM_OF_WEIGHT_MATRIX; i++){ mtrx[i] = Ctr->WeightMatrix[i]; } } __global__ void setup_kernel(curandState *state,int seed) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ curand_init(seed, id, 0, &state[id]); } __device__ float gen_u(unsigned int id, curandState *state, float ave, float vr) { float u; curandState localState = state[id]; u = curand_normal(&localState) * vr + ave; return u; } __global__ void MCMPC_callback_elite_sample(MonteCarloMPC *OutPtElt, MonteCarloMPC *AllSmplDt, int *indices) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; OutPtElt[id] = AllSmplDt[indices[id]]; __syncthreads(); /*OutPtElt[id].W = AllSmplDt[indices[id]].W; OutPtElt[id].L = AllSmplDt[indices[id]].L; for(int t = 0; t < HORIZON; t++){ for(intk = 0; k < DIM_OF_U; k++){ OutPtElt[id].InputSeq[k][t] = AllSmplDt[indices[id]].L } }*/ } __global__ void MCMPC_callback_elite_sample_by_IT(MonteCarloMPC *OutPtElt, MonteCarloMPC *AllSmplDt, int *indices) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; // OutPtElt[id] = AllSmplDt[indices[id]]; // __syncthreads(); float mdCost = 0.0f; float lambda = 1.0f; OutPtElt[id].L = AllSmplDt[indices[id]].L; mdCost = AllSmplDt[indices[id]].L - AllSmplDt[indices[0]].L; OutPtElt[id].W = exp( -mdCost / lambda ); for(int t = 0; t < HORIZON; t++){ for(int k = 0; k < DIM_OF_U; k++){ OutPtElt[id].InputSeq[k][t] = AllSmplDt[indices[id]].L; } } } __global__ void MCMPC_Cart_and_Single_Pole(MonteCarloMPC *PrCtr, curandState *randomSeed, Controller *Ctr, InputSequences *mean, float var, float *cost) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; seq = id; float stageCost = 0.0f; float totalCost = 0.0f; // float u[HORIZON] = { }; InputSequences *InputSeqInThread; InputSeqInThread = (InputSequences *)malloc(sizeof(InputSeqInThread) * HORIZON); float stateInThisThreads[DIM_OF_STATE] = { }; float dstateInThisThreads[DIM_OF_STATE] = { }; float d_param[NUM_OF_PARAMS], d_constraints[NUM_OF_CONSTRAINTS], d_matrix[DIM_OF_WEIGHT_MATRIX]; MemCpyInThread(d_param, d_constraints, d_matrix, Ctr); for(int i = 0; i < DIM_OF_STATE; i++){ stateInThisThreads[i] = Ctr->State[i]; } for(int t = 0; t < HORIZON; t++) { for(int uIndex = 0; uIndex < DIM_OF_U; uIndex++ ){ if(isnan(mean[t].InputSeq[uIndex])){ //u[t] = d_data[0].Input[t]; if(t < HORIZON -1){ // u[t] = gen_u(seq, randomSeed, PrCtr[0].InputSeq[uIndex][t+1], var); InputSeqInThread[t].InputSeq[uIndex] = gen_u(seq, randomSeed, PrCtr[0].InputSeq[uIndex][t+1], var); seq += NUM_OF_SAMPLES; }else{ // u[t] = gen_u(seq, randomSeed, PrCtr[0].InputSeq[uIndex][HORIZON - 1], var); InputSeqInThread[t].InputSeq[uIndex] = gen_u(seq, randomSeed, PrCtr[0].InputSeq[uIndex][HORIZON-1], var); seq += NUM_OF_SAMPLES; } }else{ // u[t] = gen_u(seq, randomSeed, mean[t].InputSeq[uIndex], var); InputSeqInThread[t].InputSeq[uIndex] = gen_u(seq, randomSeed, mean[t].InputSeq[uIndex], var); seq += NUM_OF_SAMPLES; } if(InputSeqInThread[t].InputSeq[uIndex] < Ctr->Constraints[0]){ InputSeqInThread[t].InputSeq[uIndex] = Ctr->Constraints[0]; } if(InputSeqInThread[t].InputSeq[uIndex] > Ctr->Constraints[1]){ InputSeqInThread[t].InputSeq[uIndex] = Ctr->Constraints[1]; } } for(int sec = 0; sec < 2; sec++){ dstateInThisThreads[0] = stateInThisThreads[2]; dstateInThisThreads[1] = stateInThisThreads[3]; /* dstateInThisThreads[2] = Cart_type_Pendulum_ddx(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); //ddx dstateInThisThreads[3] = Cart_type_Pendulum_ddtheta(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); */ dstateInThisThreads[2] = Cart_type_Pendulum_ddx(InputSeqInThread[t].InputSeq[0], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); //ddx dstateInThisThreads[3] = Cart_type_Pendulum_ddtheta(InputSeqInThread[t].InputSeq[0], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); stateInThisThreads[2] = stateInThisThreads[2] + (interval * dstateInThisThreads[2]); stateInThisThreads[3] = stateInThisThreads[3] + (interval * dstateInThisThreads[3]); stateInThisThreads[0] = stateInThisThreads[0] + (interval * dstateInThisThreads[0]); stateInThisThreads[1] = stateInThisThreads[1] + (interval * dstateInThisThreads[1]); #ifdef COLLISION if(stateInThisThreads[0] <= d_constraints[2]){ float collide[3] = { }; collide[0] = d_param[0] * d_param[1] * cosf(stateInThisThreads[1]); collide[1] = d_param[2] + d_param[0] * powf(d_param[1],2); collide[2] = collide[0] / collide[1]; stateInThisThreads[3] = stateInThisThreads[3] + (1 + d_param[7]) * collide[2] * stateInThisThreads[2]; //dtheta = dtheta + (1+e) * F * dx stateInThisThreads[2] = -d_param[7] * stateInThisThreads[2]; // dx = -e * dx stateInThisThreads[0] = d_constraints[2]; } if(d_constraints[3] <= stateInThisThreads[0]){ float collide[3] = { }; collide[0] = d_param[0] * d_param[1] * cosf(stateInThisThreads[1]); collide[1] = d_param[2] + d_param[0] * powf(d_param[1],2); collide[2] = collide[0] / collide[1]; stateInThisThreads[3] = stateInThisThreads[3] + (1 + d_param[7]) * collide[2] * stateInThisThreads[2]; //dtheta = dtheta + (1+e) * F * dx stateInThisThreads[2] = -d_param[7] * stateInThisThreads[2]; // dx = -e * dx stateInThisThreads[0] = d_constraints[3]; } #endif } while(stateInThisThreads[1] > M_PI) stateInThisThreads[1] -= (2 * M_PI); while(stateInThisThreads[1] < -M_PI) stateInThisThreads[1] += (2 * M_PI); /* stageCost = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4]; */ stageCost = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + InputSeqInThread[t].InputSeq[0] * InputSeqInThread[t].InputSeq[0] * d_matrix[4]; #ifndef COLLISION if(stateInThisThreads[0] <= 0){ stageCost += 1 / (powf(stateInThisThreads[0] - d_constraints[2],2) * invBarrier); if(stateInThisThreads[0] < d_constraints[2]){ stageCost += 1000000; } }else{ stageCost += 1 / (powf(d_constraints[3] - stateInThisThreads[0],2) * invBarrier); if(stateInThisThreads[0] > d_constraints[3]){ stageCost += 1000000; } } #endif totalCost += stageCost; stageCost = 0.0f; } if(isnan(totalCost)) { totalCost = 1000000 * (DIM_OF_STATE + DIM_OF_U); } float KL_COST, S, lambda; float HM_COST, HM; lambda = DIM_OF_STATE * HORIZON; // Using Constant Lambda S = totalCost / lambda; HM = totalCost / (0.75 * HORIZON); HM_COST = exp(-HM); if(isnan(HM_COST)){ HM_COST = 0.0f; }else{ HM_COST = HM_COST; } KL_COST = exp(-S); __syncthreads(); PrCtr[id].WHM = HM_COST; PrCtr[id].L = totalCost; PrCtr[id].W = KL_COST; cost[id] = totalCost; for(int i = 0; i < HORIZON; i++){ for(int k = 0; k < DIM_OF_U; k++) { PrCtr[id].InputSeq[k][i] = InputSeqInThread[i].InputSeq[k]; } } free(InputSeqInThread); __syncthreads(); }
dc905e4167e8adf045dd460448a2723b59885bde.hip
// !!! This is a file automatically generated by hipify!!! #include <time.h> #include "stdio.h" #include "hip/hip_runtime.h" #define BlockNum 256 #define ThreadNum 1024 #define Len 4 __host__ __device__ unsigned int crc32(unsigned char *message) { int i, j; unsigned int byte, crc, mask; i = 0; crc = 0xFFFFFFFF; while (message[i] != 0) { byte = message[i]; // Get next byte. crc = crc ^ byte; for (j = 7; j >= 0; j--) // Do eight times. { mask = -(crc & 1); crc = (crc >> 1) ^ (0xEDB88320 & mask); } i = i + 1; } return ~crc; } __host__ void crc32Host(int len, unsigned int target) { unsigned char buf[Len]; for(int i=0;i<len;i++) { buf[i]=0; } unsigned int crc=0; while(target!=crc) { buf[0]++; for(int i=0;i<len;i++) { if (buf[i]>=255) { buf[(i+1)%len]++; buf[i]=0; } } crc=crc32(buf); if(crc == target) { printf("Input Found in CPU="); for (int i = 0; i < Len; ++i) { printf("%c",buf[i]); } printf("\n"); break; } } } __global__ void crc32Device(int len, unsigned int target) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; unsigned int size = BlockNum*ThreadNum; unsigned long long spacesearch=1; for(int i=0;i<len;i++) { spacesearch *=256; } if(idx==0) printf("spacesearc=%ld,Size=%d\n",spacesearch,size ); { __syncthreads(); } unsigned char buf[Len]; for(int i=0;i<len;i++) { buf[i]=0; } unsigned int crc=0; unsigned int index=idx*((spacesearch/size)+1); for(int i=0;i<Len;i++) { buf[i]=(unsigned char)((index)&0xff); index=(index) >>8; } for(int i=0;i<((spacesearch/size)+1);i++) { for(int j=0;j<len;j++) { if (buf[j]>=255) { buf[(j+1)%len]++; buf[j]=0; } } crc=crc32(buf); buf[0]++; if(crc == target) { printf("Input Found in GPU="); for (int i = 0; i < Len; ++i) { printf("%c",buf[i]); } printf("\n"); } } __syncthreads(); } int main() { unsigned char boi[Len]={0}; for(int i=0;i<Len;i++) boi[i]='b'; unsigned int test =crc32(boi); printf("%x\n",test ); // Set the Device Number hipSetDevice(0); // Allocating memory in device int len; unsigned int target; hipMalloc((void**)&len, sizeof(int) * 1); hipMalloc((void**)&target, sizeof(unsigned int) * 1); // Setting CUDA timer finction hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Host function in CPU hipEventRecord(start,0); // crc32Host( Len,test); hipEventRecord(stop,0); hipEventSynchronize(stop); float miliseconds_cpu = 0; hipEventElapsedTime(&miliseconds_cpu,start,stop); // printf("Elapsed Time for the CPU computation is :%f\n",miliseconds_cpu/1000); // Device function in GPU hipEventCreate(&start); hipEventCreate(&stop); float miliseconds_gpu = 0; hipEventRecord(start,0); hipLaunchKernelGGL(( crc32Device), dim3(BlockNum),dim3(ThreadNum), 0, 0, Len, test); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&miliseconds_gpu,start,stop); printf("Elapsed Time for the GPU computation is :%f\n",miliseconds_gpu/1000); //printf("GPU speedup over CPU is :%f\nx",miliseconds_cpu/miliseconds_gpu); hipDeviceReset(); return 0; }
dc905e4167e8adf045dd460448a2723b59885bde.cu
#include <time.h> #include "stdio.h" #include "cuda_runtime.h" #define BlockNum 256 #define ThreadNum 1024 #define Len 4 __host__ __device__ unsigned int crc32(unsigned char *message) { int i, j; unsigned int byte, crc, mask; i = 0; crc = 0xFFFFFFFF; while (message[i] != 0) { byte = message[i]; // Get next byte. crc = crc ^ byte; for (j = 7; j >= 0; j--) // Do eight times. { mask = -(crc & 1); crc = (crc >> 1) ^ (0xEDB88320 & mask); } i = i + 1; } return ~crc; } __host__ void crc32Host(int len, unsigned int target) { unsigned char buf[Len]; for(int i=0;i<len;i++) { buf[i]=0; } unsigned int crc=0; while(target!=crc) { buf[0]++; for(int i=0;i<len;i++) { if (buf[i]>=255) { buf[(i+1)%len]++; buf[i]=0; } } crc=crc32(buf); if(crc == target) { printf("Input Found in CPU="); for (int i = 0; i < Len; ++i) { printf("%c",buf[i]); } printf("\n"); break; } } } __global__ void crc32Device(int len, unsigned int target) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; unsigned int size = BlockNum*ThreadNum; unsigned long long spacesearch=1; for(int i=0;i<len;i++) { spacesearch *=256; } if(idx==0) printf("spacesearc=%ld,Size=%d\n",spacesearch,size ); { __syncthreads(); } unsigned char buf[Len]; for(int i=0;i<len;i++) { buf[i]=0; } unsigned int crc=0; unsigned int index=idx*((spacesearch/size)+1); for(int i=0;i<Len;i++) { buf[i]=(unsigned char)((index)&0xff); index=(index) >>8; } for(int i=0;i<((spacesearch/size)+1);i++) { for(int j=0;j<len;j++) { if (buf[j]>=255) { buf[(j+1)%len]++; buf[j]=0; } } crc=crc32(buf); buf[0]++; if(crc == target) { printf("Input Found in GPU="); for (int i = 0; i < Len; ++i) { printf("%c",buf[i]); } printf("\n"); } } __syncthreads(); } int main() { unsigned char boi[Len]={0}; for(int i=0;i<Len;i++) boi[i]='b'; unsigned int test =crc32(boi); printf("%x\n",test ); // Set the Device Number cudaSetDevice(0); // Allocating memory in device int len; unsigned int target; cudaMalloc((void**)&len, sizeof(int) * 1); cudaMalloc((void**)&target, sizeof(unsigned int) * 1); // Setting CUDA timer finction cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Host function in CPU cudaEventRecord(start,0); // crc32Host( Len,test); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float miliseconds_cpu = 0; cudaEventElapsedTime(&miliseconds_cpu,start,stop); // printf("Elapsed Time for the CPU computation is :%f\n",miliseconds_cpu/1000); // Device function in GPU cudaEventCreate(&start); cudaEventCreate(&stop); float miliseconds_gpu = 0; cudaEventRecord(start,0); crc32Device<<<BlockNum,ThreadNum>>>(Len, test); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&miliseconds_gpu,start,stop); printf("Elapsed Time for the GPU computation is :%f\n",miliseconds_gpu/1000); //printf("GPU speedup over CPU is :%f\nx",miliseconds_cpu/miliseconds_gpu); cudaDeviceReset(); return 0; }
65be9e9e04cc98f6fd403c9fa62afc98680aca7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void vignette(const unsigned char * src, unsigned char * dst, float inner, float outer, const size_t width, const size_t height) { // the xIndex and yIndex will be used cordinates pixels of the image // NOTE // NOTE This assumes that we are treating this as a two dimensional data structure and the blocks will be used in the same way // NOTE size_t xIndex = blockIdx.x * blockDim.x + threadIdx.x; size_t yIndex = blockIdx.y * blockDim.y + threadIdx.y; // Checking to see if the indexs are within the bounds of the image if (xIndex < width && yIndex < height) { // offset represents postion of the current pixel in the one dimensional array size_t offset = yIndex * width + xIndex; // Shift the pixel oriented coordinates into image resolution independent coordinates // where 0, 0 is the center of the image. float x = xIndex / float(height) - float(width) / float(height) / 2.0f; float y = yIndex / float(height) - 0.5f; //Calculates current pixels distance from the center where the cordinates are 0, 0 float d = sqrtf(x * x + y * y); if (d < inner) { // if d is less than inner boundary, we don't change that specific image pixel *(dst + offset) = *(src + offset); } else if (d > outer) { // if d is greater than outer boundary, we set it to 0 so it becomes black *(dst + offset) = 0; } else { // If in between the inner and outer boundaries, it will be a shade of gray. // NOTE // NOTE This assumes... by the time we get here, we have checked that outer does not equal inner // NOTE This also assumes ... by the time we get here, we have made inner less than outer // NOTE float v = 1 - (d - inner) / (outer - inner); *(dst + offset) = (unsigned char)(*(src + offset) * v); } } }
65be9e9e04cc98f6fd403c9fa62afc98680aca7b.cu
#include "includes.h" __global__ void vignette(const unsigned char * src, unsigned char * dst, float inner, float outer, const size_t width, const size_t height) { // the xIndex and yIndex will be used cordinates pixels of the image // NOTE // NOTE This assumes that we are treating this as a two dimensional data structure and the blocks will be used in the same way // NOTE size_t xIndex = blockIdx.x * blockDim.x + threadIdx.x; size_t yIndex = blockIdx.y * blockDim.y + threadIdx.y; // Checking to see if the indexs are within the bounds of the image if (xIndex < width && yIndex < height) { // offset represents postion of the current pixel in the one dimensional array size_t offset = yIndex * width + xIndex; // Shift the pixel oriented coordinates into image resolution independent coordinates // where 0, 0 is the center of the image. float x = xIndex / float(height) - float(width) / float(height) / 2.0f; float y = yIndex / float(height) - 0.5f; //Calculates current pixels distance from the center where the cordinates are 0, 0 float d = sqrtf(x * x + y * y); if (d < inner) { // if d is less than inner boundary, we don't change that specific image pixel *(dst + offset) = *(src + offset); } else if (d > outer) { // if d is greater than outer boundary, we set it to 0 so it becomes black *(dst + offset) = 0; } else { // If in between the inner and outer boundaries, it will be a shade of gray. // NOTE // NOTE This assumes... by the time we get here, we have checked that outer does not equal inner // NOTE This also assumes ... by the time we get here, we have made inner less than outer // NOTE float v = 1 - (d - inner) / (outer - inner); *(dst + offset) = (unsigned char)(*(src + offset) * v); } } }
22e9e2ad3a6ea2b3fdaf4002feb8270a0ce2105d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Triangle map SAH creator interface for CUDA // ------------------------------------------------------------------- // Copyright (C) 2010 OpenEngine.dk (See AUTHORS) // // This program is free software; It is covered by the GNU General // Public License version 2 or any later version. // See the GNU General Public License for more details (see LICENSE). //-------------------------------------------------------------------- #include <Utils/CUDA/TriangleMapSAHCreator.h> #include <Scene/TriangleNode.h> #include <Utils/CUDA/TriangleMap.h> #include <Utils/CUDA/Convert.h> #include <Utils/CUDA/Utils.h> #include <Utils/CUDA/IntersectionTests.h> #include <Utils/CUDA/Kernels/DeviceVars.h> #include <Logging/Logger.h> namespace OpenEngine { using namespace Resources::CUDA; using namespace Scene; namespace Utils { namespace CUDA { using namespace Kernels; namespace TriangleMapSAHKernels { #include <Utils/CUDA/Kernels/LowerTriangleMap.h> } using namespace TriangleMapSAHKernels; TriangleMapSAHCreator::TriangleMapSAHCreator() : ITriangleMapCreator(), traversalCost(24.0f) { cutCreateTimer(&timerID); splitTriangleSet = new CUDADataBlock<KDNode::bitmap4>(1); primAreas = new CUDADataBlock<float>(1); childAreas = new CUDADataBlock<float2>(1); childSets = new CUDADataBlock<KDNode::bitmap2>(1); splitSide = new CUDADataBlock<int>(1); splitAddr = new CUDADataBlock<int>(1); scanConfig.algorithm = CUDPP_SCAN; scanConfig.op = CUDPP_ADD; scanConfig.datatype = CUDPP_INT; scanConfig.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_EXCLUSIVE; scanSize = 262144; CUDPPResult res = cudppPlan(&scanHandle, scanConfig, scanSize, 1, 0); if (CUDPP_SUCCESS != res) throw Core::Exception("Error creating CUDPP scanPlan for Triangle Map SAH Creator"); } TriangleMapSAHCreator::~TriangleMapSAHCreator() { if (splitTriangleSet) delete splitTriangleSet; if (primAreas) delete primAreas; if (childAreas) delete childAreas; if (childSets) delete childSets; if (splitSide) delete splitSide; if (splitAddr) delete splitAddr; } void TriangleMapSAHCreator::Create(TriangleMap* map, CUDADataBlock<int>* upperLeafIDs){ primMin = map->primMin; primMax = map->primMax; SetPropagateBoundingBox(map->GetPropagateBoundingBox()); int activeIndex = map->nodes->GetSize(); int activeRange = upperLeafIDs->GetSize(); int childrenCreated; int triangles = map->primMin->GetSize(); hipMemcpyToSymbol(d_triangles, &triangles, sizeof(int)); //START_TIMER(timerID); PreprocessLowerNodes(activeIndex, activeRange, map, upperLeafIDs); //PRINT_TIMER(timerID, "Preprocess lower nodes using SAH"); //START_TIMER(timerID); ProcessLowerNodes(activeIndex, activeRange, map, upperLeafIDs, childrenCreated); activeIndex = map->nodes->GetSize() - childrenCreated; activeRange = childrenCreated; while (activeRange > 0){ ProcessLowerNodes(activeIndex, activeRange, map, NULL, childrenCreated); activeIndex = map->nodes->GetSize() - childrenCreated; activeRange = childrenCreated; } //PRINT_TIMER(timerID, "Process lower nodes using SAH"); } void TriangleMapSAHCreator::PreprocessLowerNodes(int activeIndex, int activeRange, TriangleMap* map, CUDADataBlock<int>* upperLeafIDs) { int triangles = primMin->GetSize(); //logger.info << "=== Preprocess " << activeRange << " Lower Nodes Starting at " << activeIndex << " === with " << triangles << " indices" << logger.end; GeometryList* geom = map->GetGeometry(); primAreas->Extend(triangles); unsigned int blocks, threads, smemSize; Calc1DKernelDimensions(triangles, blocks, threads); hipLaunchKernelGGL(( CalcSurfaceArea), dim3(blocks), dim3(threads), 0, 0, map->GetPrimitiveIndices()->GetDeviceData(), geom->GetP0Data(), geom->GetP1Data(), geom->GetP2Data(), primAreas->GetDeviceData(), triangles); CHECK_FOR_CUDA_ERROR(); TriangleNode* nodes = map->nodes; splitTriangleSet->Extend(triangles * 3); Calc1DKernelDimensions(activeRange, blocks, threads); hipLaunchKernelGGL(( PreprocesLowerNodes), dim3(blocks), dim3(threads), 0, 0, upperLeafIDs->GetDeviceData(), nodes->GetPrimitiveIndexData(), nodes->GetPrimitiveBitmapData(), nodes->GetSurfaceAreaData(), primAreas->GetDeviceData(), activeRange); CHECK_FOR_CUDA_ERROR(); unsigned int smemPrThread = sizeof(float3) + sizeof(float3); Calc1DKernelDimensionsWithSmem(activeRange * TriangleNode::MAX_LOWER_SIZE, smemPrThread, blocks, threads, smemSize, 128); hipLaunchKernelGGL(( CreateSplittingPlanes), dim3(blocks), dim3(threads), smemSize, 0, upperLeafIDs->GetDeviceData(), nodes->GetPrimitiveIndexData(), nodes->GetPrimitiveBitmapData(), primMin->GetDeviceData(), primMax->GetDeviceData(), splitTriangleSet->GetDeviceData(), activeRange); CHECK_FOR_CUDA_ERROR(); #if CPU_VERIFY CheckPreprocess(activeIndex, activeRange, map, upperLeafIDs); #endif } void TriangleMapSAHCreator::ProcessLowerNodes(int activeIndex, int activeRange, TriangleMap* map, CUDADataBlock<int>* upperLeafIDs, int &childrenCreated) { /* if (upperLeafIDs) logger.info << "=== Process " << activeRange << " Lower Nodes from Indices ===" << logger.end; else logger.info << "=== Process " << activeRange << " Lower Nodes Starting at " << activeIndex << " ===" << logger.end; */ TriangleNode* nodes = map->nodes; hipMemcpyToSymbol(d_activeNodeIndex, &activeIndex, sizeof(int)); hipMemcpyToSymbol(d_activeNodeRange, &activeRange, sizeof(int)); childAreas->Extend(activeRange); childSets->Extend(activeRange); splitSide->Extend(activeRange+1); splitAddr->Extend(activeRange+1); unsigned int smemPrThread = TriangleNode::MAX_LOWER_SIZE * sizeof(float); unsigned int maxThreadsPrBlock = TriangleNode::MAX_LOWER_SIZE <= 32 ? 96 : 32; KernelConf conf = KernelConf1D(activeRange, maxThreadsPrBlock, 0, smemPrThread); //logger.info << "<<<" << conf.blocks << ", " << conf.threads << ", " << conf.smem << ">>>" << logger.end; if (upperLeafIDs) hipLaunchKernelGGL(( CalcSAH<true>), dim3(conf.blocks), dim3(conf.threads), conf.smem, 0, upperLeafIDs->GetDeviceData(), nodes->GetInfoData(), nodes->GetSplitPositionData(), nodes->GetPrimitiveIndexData(), nodes->GetPrimitiveBitmapData(), nodes->GetSurfaceAreaData(), primAreas->GetDeviceData(), primMin->GetDeviceData(), primMax->GetDeviceData(), splitTriangleSet->GetDeviceData(), childAreas->GetDeviceData(), childSets->GetDeviceData(), splitSide->GetDeviceData(), traversalCost); else hipLaunchKernelGGL(( CalcSAH<false>), dim3(conf.blocks), dim3(conf.threads), conf.smem, 0, NULL, nodes->GetInfoData(), nodes->GetSplitPositionData(), nodes->GetPrimitiveIndexData(), nodes->GetPrimitiveBitmapData(), nodes->GetSurfaceAreaData(), primAreas->GetDeviceData(), primMin->GetDeviceData(), primMax->GetDeviceData(), splitTriangleSet->GetDeviceData(), childAreas->GetDeviceData(), childSets->GetDeviceData(), splitSide->GetDeviceData(), traversalCost); CHECK_FOR_CUDA_ERROR(); cudppScan(scanHandle, splitAddr->GetDeviceData(), splitSide->GetDeviceData(), activeRange+1); CHECK_FOR_CUDA_ERROR(); int splits; hipMemcpy(&splits, splitAddr->GetDeviceData() + activeRange, sizeof(int), hipMemcpyDeviceToHost); nodes->Extend(nodes->GetSize() + 2 * splits); conf = KernelConf1D(activeRange); if (upperLeafIDs) hipLaunchKernelGGL(( CreateLowerSAHChildren<true>), dim3(conf.blocks), dim3(conf.threads), 0, 0, upperLeafIDs->GetDeviceData(), splitSide->GetDeviceData(), splitAddr->GetDeviceData(), childAreas->GetDeviceData(), childSets->GetDeviceData(), nodes->GetSurfaceAreaData(), nodes->GetPrimitiveIndexData(), nodes->GetPrimitiveAmountData(), nodes->GetChildrenData(), splits); else hipLaunchKernelGGL(( CreateLowerSAHChildren<false>), dim3(conf.blocks), dim3(conf.threads), 0, 0, NULL, splitSide->GetDeviceData(), splitAddr->GetDeviceData(), childAreas->GetDeviceData(), childSets->GetDeviceData(), nodes->GetSurfaceAreaData(), nodes->GetPrimitiveIndexData(), nodes->GetPrimitiveAmountData(), nodes->GetChildrenData(), splits); CHECK_FOR_CUDA_ERROR(); childrenCreated = splits * 2; if (propagateAabbs && childrenCreated > 0){ // @TODO propagate downwards or upwards? Test // which is fastest (for non trivial splits // sherlock if (upperLeafIDs){ hipLaunchKernelGGL(( PropagateAabbToChildren<true>), dim3(conf.blocks), dim3(conf.threads), 0, 0, upperLeafIDs->GetDeviceData(), nodes->GetInfoData(), nodes->GetSplitPositionData(), nodes->GetAabbMinData(), nodes->GetAabbMaxData(), nodes->GetChildrenData()); }else hipLaunchKernelGGL(( PropagateAabbToChildren<false>), dim3(conf.blocks), dim3(conf.threads), 0, 0, NULL, nodes->GetInfoData(), nodes->GetSplitPositionData(), nodes->GetAabbMinData(), nodes->GetAabbMaxData(), nodes->GetChildrenData()); CHECK_FOR_CUDA_ERROR(); } } void TriangleMapSAHCreator::CheckPreprocess(int activeIndex, int activeRange, TriangleMap* map, Resources::CUDA::CUDADataBlock<int>* leafIDs) { throw Exception("CheckPreprocess was broken by removing PROXY"); /* TriangleNode* nodes = map->nodes; int h_leafIDs[activeRange]; hipMemcpy(h_leafIDs, leafIDs->GetDeviceData(), activeRange * sizeof(int), hipMemcpyDeviceToHost); CHECK_FOR_CUDA_ERROR(); char info[activeRange]; int2 leafPrimInfo[activeRange]; int left[activeRange]; for (int i = 0; i < activeRange; ++i){ hipMemcpy(info + i, nodes->GetInfoData() + h_leafIDs[i], sizeof(char), hipMemcpyDeviceToHost); hipMemcpy(leafPrimInfo + i, nodes->GetPrimitiveInfoData() + h_leafIDs[i], sizeof(int2), hipMemcpyDeviceToHost); hipMemcpy(left + i, nodes->GetLeftData() + h_leafIDs[i], sizeof(int), hipMemcpyDeviceToHost); } CHECK_FOR_CUDA_ERROR(); int2 lowerPrimInfo[activeRange]; hipMemcpy(lowerPrimInfo, nodes->GetPrimitiveInfoData() + activeIndex, activeRange * sizeof(int2), hipMemcpyDeviceToHost); CHECK_FOR_CUDA_ERROR(); for (int i = 0; i < activeRange; ++i){ if (info[i] == KDNode::LEAF){ if (lowerPrimInfo[i].y != 0) throw Exception("Empty lower node didn't result in upper leaf."); } if (left[i] != activeIndex + i) throw Exception("leaf not pointing to correct lower node"); } for (int i = 0; i < activeRange; ++i){ if (lowerPrimInfo[i].x != leafPrimInfo[i].x) throw Exception("Leaf node " + Utils::Convert::ToString(h_leafIDs[i]) + "'s index " + Utils::Convert::ToString(leafPrimInfo[i].x) + " does not match lower node " + Utils::Convert::ToString(activeIndex + i) + "'s " + Utils::Convert::ToString(lowerPrimInfo[i].x)); if (bitcount(lowerPrimInfo[i].y) > leafPrimInfo[i].y) throw Exception("Leaf node " + Utils::Convert::ToString(h_leafIDs[i]) + "'s size of " + Utils::Convert::ToString(leafPrimInfo[i].y) + " does not match lower node " + Utils::Convert::ToString(activeIndex + i) + "'s bitmap " + BitmapToString(lowerPrimInfo[i].y)); } */ // @TODO check split set } } } }
22e9e2ad3a6ea2b3fdaf4002feb8270a0ce2105d.cu
// Triangle map SAH creator interface for CUDA // ------------------------------------------------------------------- // Copyright (C) 2010 OpenEngine.dk (See AUTHORS) // // This program is free software; It is covered by the GNU General // Public License version 2 or any later version. // See the GNU General Public License for more details (see LICENSE). //-------------------------------------------------------------------- #include <Utils/CUDA/TriangleMapSAHCreator.h> #include <Scene/TriangleNode.h> #include <Utils/CUDA/TriangleMap.h> #include <Utils/CUDA/Convert.h> #include <Utils/CUDA/Utils.h> #include <Utils/CUDA/IntersectionTests.h> #include <Utils/CUDA/Kernels/DeviceVars.h> #include <Logging/Logger.h> namespace OpenEngine { using namespace Resources::CUDA; using namespace Scene; namespace Utils { namespace CUDA { using namespace Kernels; namespace TriangleMapSAHKernels { #include <Utils/CUDA/Kernels/LowerTriangleMap.h> } using namespace TriangleMapSAHKernels; TriangleMapSAHCreator::TriangleMapSAHCreator() : ITriangleMapCreator(), traversalCost(24.0f) { cutCreateTimer(&timerID); splitTriangleSet = new CUDADataBlock<KDNode::bitmap4>(1); primAreas = new CUDADataBlock<float>(1); childAreas = new CUDADataBlock<float2>(1); childSets = new CUDADataBlock<KDNode::bitmap2>(1); splitSide = new CUDADataBlock<int>(1); splitAddr = new CUDADataBlock<int>(1); scanConfig.algorithm = CUDPP_SCAN; scanConfig.op = CUDPP_ADD; scanConfig.datatype = CUDPP_INT; scanConfig.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_EXCLUSIVE; scanSize = 262144; CUDPPResult res = cudppPlan(&scanHandle, scanConfig, scanSize, 1, 0); if (CUDPP_SUCCESS != res) throw Core::Exception("Error creating CUDPP scanPlan for Triangle Map SAH Creator"); } TriangleMapSAHCreator::~TriangleMapSAHCreator() { if (splitTriangleSet) delete splitTriangleSet; if (primAreas) delete primAreas; if (childAreas) delete childAreas; if (childSets) delete childSets; if (splitSide) delete splitSide; if (splitAddr) delete splitAddr; } void TriangleMapSAHCreator::Create(TriangleMap* map, CUDADataBlock<int>* upperLeafIDs){ primMin = map->primMin; primMax = map->primMax; SetPropagateBoundingBox(map->GetPropagateBoundingBox()); int activeIndex = map->nodes->GetSize(); int activeRange = upperLeafIDs->GetSize(); int childrenCreated; int triangles = map->primMin->GetSize(); cudaMemcpyToSymbol(d_triangles, &triangles, sizeof(int)); //START_TIMER(timerID); PreprocessLowerNodes(activeIndex, activeRange, map, upperLeafIDs); //PRINT_TIMER(timerID, "Preprocess lower nodes using SAH"); //START_TIMER(timerID); ProcessLowerNodes(activeIndex, activeRange, map, upperLeafIDs, childrenCreated); activeIndex = map->nodes->GetSize() - childrenCreated; activeRange = childrenCreated; while (activeRange > 0){ ProcessLowerNodes(activeIndex, activeRange, map, NULL, childrenCreated); activeIndex = map->nodes->GetSize() - childrenCreated; activeRange = childrenCreated; } //PRINT_TIMER(timerID, "Process lower nodes using SAH"); } void TriangleMapSAHCreator::PreprocessLowerNodes(int activeIndex, int activeRange, TriangleMap* map, CUDADataBlock<int>* upperLeafIDs) { int triangles = primMin->GetSize(); //logger.info << "=== Preprocess " << activeRange << " Lower Nodes Starting at " << activeIndex << " === with " << triangles << " indices" << logger.end; GeometryList* geom = map->GetGeometry(); primAreas->Extend(triangles); unsigned int blocks, threads, smemSize; Calc1DKernelDimensions(triangles, blocks, threads); CalcSurfaceArea<<<blocks, threads>>>(map->GetPrimitiveIndices()->GetDeviceData(), geom->GetP0Data(), geom->GetP1Data(), geom->GetP2Data(), primAreas->GetDeviceData(), triangles); CHECK_FOR_CUDA_ERROR(); TriangleNode* nodes = map->nodes; splitTriangleSet->Extend(triangles * 3); Calc1DKernelDimensions(activeRange, blocks, threads); PreprocesLowerNodes<<<blocks, threads>>>(upperLeafIDs->GetDeviceData(), nodes->GetPrimitiveIndexData(), nodes->GetPrimitiveBitmapData(), nodes->GetSurfaceAreaData(), primAreas->GetDeviceData(), activeRange); CHECK_FOR_CUDA_ERROR(); unsigned int smemPrThread = sizeof(float3) + sizeof(float3); Calc1DKernelDimensionsWithSmem(activeRange * TriangleNode::MAX_LOWER_SIZE, smemPrThread, blocks, threads, smemSize, 128); CreateSplittingPlanes<<<blocks, threads, smemSize>>> (upperLeafIDs->GetDeviceData(), nodes->GetPrimitiveIndexData(), nodes->GetPrimitiveBitmapData(), primMin->GetDeviceData(), primMax->GetDeviceData(), splitTriangleSet->GetDeviceData(), activeRange); CHECK_FOR_CUDA_ERROR(); #if CPU_VERIFY CheckPreprocess(activeIndex, activeRange, map, upperLeafIDs); #endif } void TriangleMapSAHCreator::ProcessLowerNodes(int activeIndex, int activeRange, TriangleMap* map, CUDADataBlock<int>* upperLeafIDs, int &childrenCreated) { /* if (upperLeafIDs) logger.info << "=== Process " << activeRange << " Lower Nodes from Indices ===" << logger.end; else logger.info << "=== Process " << activeRange << " Lower Nodes Starting at " << activeIndex << " ===" << logger.end; */ TriangleNode* nodes = map->nodes; cudaMemcpyToSymbol(d_activeNodeIndex, &activeIndex, sizeof(int)); cudaMemcpyToSymbol(d_activeNodeRange, &activeRange, sizeof(int)); childAreas->Extend(activeRange); childSets->Extend(activeRange); splitSide->Extend(activeRange+1); splitAddr->Extend(activeRange+1); unsigned int smemPrThread = TriangleNode::MAX_LOWER_SIZE * sizeof(float); unsigned int maxThreadsPrBlock = TriangleNode::MAX_LOWER_SIZE <= 32 ? 96 : 32; KernelConf conf = KernelConf1D(activeRange, maxThreadsPrBlock, 0, smemPrThread); //logger.info << "<<<" << conf.blocks << ", " << conf.threads << ", " << conf.smem << ">>>" << logger.end; if (upperLeafIDs) CalcSAH<true><<<conf.blocks, conf.threads, conf.smem>>>(upperLeafIDs->GetDeviceData(), nodes->GetInfoData(), nodes->GetSplitPositionData(), nodes->GetPrimitiveIndexData(), nodes->GetPrimitiveBitmapData(), nodes->GetSurfaceAreaData(), primAreas->GetDeviceData(), primMin->GetDeviceData(), primMax->GetDeviceData(), splitTriangleSet->GetDeviceData(), childAreas->GetDeviceData(), childSets->GetDeviceData(), splitSide->GetDeviceData(), traversalCost); else CalcSAH<false><<<conf.blocks, conf.threads, conf.smem>>>(NULL, nodes->GetInfoData(), nodes->GetSplitPositionData(), nodes->GetPrimitiveIndexData(), nodes->GetPrimitiveBitmapData(), nodes->GetSurfaceAreaData(), primAreas->GetDeviceData(), primMin->GetDeviceData(), primMax->GetDeviceData(), splitTriangleSet->GetDeviceData(), childAreas->GetDeviceData(), childSets->GetDeviceData(), splitSide->GetDeviceData(), traversalCost); CHECK_FOR_CUDA_ERROR(); cudppScan(scanHandle, splitAddr->GetDeviceData(), splitSide->GetDeviceData(), activeRange+1); CHECK_FOR_CUDA_ERROR(); int splits; cudaMemcpy(&splits, splitAddr->GetDeviceData() + activeRange, sizeof(int), cudaMemcpyDeviceToHost); nodes->Extend(nodes->GetSize() + 2 * splits); conf = KernelConf1D(activeRange); if (upperLeafIDs) CreateLowerSAHChildren<true><<<conf.blocks, conf.threads>>> (upperLeafIDs->GetDeviceData(), splitSide->GetDeviceData(), splitAddr->GetDeviceData(), childAreas->GetDeviceData(), childSets->GetDeviceData(), nodes->GetSurfaceAreaData(), nodes->GetPrimitiveIndexData(), nodes->GetPrimitiveAmountData(), nodes->GetChildrenData(), splits); else CreateLowerSAHChildren<false><<<conf.blocks, conf.threads>>> (NULL, splitSide->GetDeviceData(), splitAddr->GetDeviceData(), childAreas->GetDeviceData(), childSets->GetDeviceData(), nodes->GetSurfaceAreaData(), nodes->GetPrimitiveIndexData(), nodes->GetPrimitiveAmountData(), nodes->GetChildrenData(), splits); CHECK_FOR_CUDA_ERROR(); childrenCreated = splits * 2; if (propagateAabbs && childrenCreated > 0){ // @TODO propagate downwards or upwards? Test // which is fastest (for non trivial splits // sherlock if (upperLeafIDs){ PropagateAabbToChildren<true><<<conf.blocks, conf.threads>>>(upperLeafIDs->GetDeviceData(), nodes->GetInfoData(), nodes->GetSplitPositionData(), nodes->GetAabbMinData(), nodes->GetAabbMaxData(), nodes->GetChildrenData()); }else PropagateAabbToChildren<false><<<conf.blocks, conf.threads>>>(NULL, nodes->GetInfoData(), nodes->GetSplitPositionData(), nodes->GetAabbMinData(), nodes->GetAabbMaxData(), nodes->GetChildrenData()); CHECK_FOR_CUDA_ERROR(); } } void TriangleMapSAHCreator::CheckPreprocess(int activeIndex, int activeRange, TriangleMap* map, Resources::CUDA::CUDADataBlock<int>* leafIDs) { throw Exception("CheckPreprocess was broken by removing PROXY"); /* TriangleNode* nodes = map->nodes; int h_leafIDs[activeRange]; cudaMemcpy(h_leafIDs, leafIDs->GetDeviceData(), activeRange * sizeof(int), cudaMemcpyDeviceToHost); CHECK_FOR_CUDA_ERROR(); char info[activeRange]; int2 leafPrimInfo[activeRange]; int left[activeRange]; for (int i = 0; i < activeRange; ++i){ cudaMemcpy(info + i, nodes->GetInfoData() + h_leafIDs[i], sizeof(char), cudaMemcpyDeviceToHost); cudaMemcpy(leafPrimInfo + i, nodes->GetPrimitiveInfoData() + h_leafIDs[i], sizeof(int2), cudaMemcpyDeviceToHost); cudaMemcpy(left + i, nodes->GetLeftData() + h_leafIDs[i], sizeof(int), cudaMemcpyDeviceToHost); } CHECK_FOR_CUDA_ERROR(); int2 lowerPrimInfo[activeRange]; cudaMemcpy(lowerPrimInfo, nodes->GetPrimitiveInfoData() + activeIndex, activeRange * sizeof(int2), cudaMemcpyDeviceToHost); CHECK_FOR_CUDA_ERROR(); for (int i = 0; i < activeRange; ++i){ if (info[i] == KDNode::LEAF){ if (lowerPrimInfo[i].y != 0) throw Exception("Empty lower node didn't result in upper leaf."); } if (left[i] != activeIndex + i) throw Exception("leaf not pointing to correct lower node"); } for (int i = 0; i < activeRange; ++i){ if (lowerPrimInfo[i].x != leafPrimInfo[i].x) throw Exception("Leaf node " + Utils::Convert::ToString(h_leafIDs[i]) + "'s index " + Utils::Convert::ToString(leafPrimInfo[i].x) + " does not match lower node " + Utils::Convert::ToString(activeIndex + i) + "'s " + Utils::Convert::ToString(lowerPrimInfo[i].x)); if (bitcount(lowerPrimInfo[i].y) > leafPrimInfo[i].y) throw Exception("Leaf node " + Utils::Convert::ToString(h_leafIDs[i]) + "'s size of " + Utils::Convert::ToString(leafPrimInfo[i].y) + " does not match lower node " + Utils::Convert::ToString(activeIndex + i) + "'s bitmap " + BitmapToString(lowerPrimInfo[i].y)); } */ // @TODO check split set } } } }
4f4aa4fcd51df3a924951a92edbb026bb8a64fe9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017-2018 XGBoost contributors */ #include <xgboost/tree_updater.h> #include <utility> #include <vector> #include <limits> #include <string> #include "../common/common.h" #include "param.h" #include "updater_gpu_common.cuh" namespace xgboost { namespace tree { DMLC_REGISTRY_FILE_TAG(updater_gpu); template <typename GradientPairT> XGBOOST_DEVICE float inline LossChangeMissing(const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum, const float& parent_gain, const GPUTrainingParam& param, bool& missing_left_out) { // NOLINT // Put gradients of missing values to left float missing_left_loss = DeviceCalcLossChange(param, scan + missing, parent_sum, parent_gain); float missing_right_loss = DeviceCalcLossChange(param, scan, parent_sum, parent_gain); if (missing_left_loss >= missing_right_loss) { missing_left_out = true; return missing_left_loss; } else { missing_left_out = false; return missing_right_loss; } } /** * @brief Absolute BFS order IDs to col-wise unique IDs based on user input * @param tid the index of the element that this thread should access * @param abs the array of absolute IDs * @param colIds the array of column IDs for each element * @param nodeStart the start of the node ID at this level * @param nKeys number of nodes at this level. * @return the uniq key */ static HOST_DEV_INLINE NodeIdT Abs2UniqueKey(int tid, common::Span<const NodeIdT> abs, common::Span<const int> colIds, NodeIdT nodeStart, int nKeys) { int a = abs[tid]; if (a == kUnusedNode) return a; return ((a - nodeStart) + (colIds[tid] * nKeys)); } /** * @struct Pair * @brief Pair used for key basd scan operations on GradientPair */ struct Pair { int key; GradientPair value; }; /** define a key that's not used at all in the entire boosting process */ static const int kNoneKey = -100; /** * @brief Allocate temporary buffers needed for scan operations * @param tmpScans gradient buffer * @param tmpKeys keys buffer * @param size number of elements that will be scanned */ template <int BLKDIM_L1L3 = 256> int ScanTempBufferSize(int size) { int num_blocks = dh::DivRoundUp(size, BLKDIM_L1L3); return num_blocks; } struct AddByKey { template <typename T> HOST_DEV_INLINE T operator()(const T& first, const T& second) const { T result; if (first.key == second.key) { result.key = first.key; result.value = first.value + second.value; } else { result.key = second.key; result.value = second.value; } return result; } }; /** * @brief Gradient value getter function * @param id the index into the vals or instIds array to which to fetch * @param vals the gradient value buffer * @param instIds instance index buffer * @return the expected gradient value */ HOST_DEV_INLINE GradientPair Get(int id, common::Span<const GradientPair> vals, common::Span<const int> instIds) { id = instIds[id]; return vals[id]; } template <int BLKDIM_L1L3> __global__ void CubScanByKeyL1( common::Span<GradientPair> scans, common::Span<const GradientPair> vals, common::Span<const int> instIds, common::Span<GradientPair> mScans, common::Span<int> mKeys, common::Span<const NodeIdT> keys, int nUniqKeys, common::Span<const int> colIds, NodeIdT nodeStart, const int size) { Pair rootPair = {kNoneKey, GradientPair(0.f, 0.f)}; int myKey; GradientPair myValue; using BlockScan = hipcub::BlockScan<Pair, BLKDIM_L1L3>; __shared__ typename BlockScan::TempStorage temp_storage; Pair threadData; int tid = blockIdx.x * BLKDIM_L1L3 + threadIdx.x; if (tid < size) { myKey = Abs2UniqueKey(tid, keys, colIds, nodeStart, nUniqKeys); myValue = Get(tid, vals, instIds); } else { myKey = kNoneKey; myValue = {}; } threadData.key = myKey; threadData.value = myValue; // get previous key, especially needed for the last thread in this block // in order to pass on the partial scan values. // this statement MUST appear before the checks below! // else, the result of this shuffle operation will be undefined #if (__CUDACC_VER_MAJOR__ >= 9) int previousKey = __shfl_up_sync(0xFFFFFFFF, myKey, 1); #else int previousKey = __shfl_up(myKey, 1); #endif // Collectively compute the block-wide exclusive prefix sum BlockScan(temp_storage) .ExclusiveScan(threadData, threadData, rootPair, AddByKey()); if (tid < size) { scans[tid] = threadData.value; } else { return; } if (threadIdx.x == BLKDIM_L1L3 - 1) { threadData.value = (myKey == previousKey) ? threadData.value : GradientPair(0.0f, 0.0f); mKeys[blockIdx.x] = myKey; mScans[blockIdx.x] = threadData.value + myValue; } } template <int BLKSIZE> __global__ void CubScanByKeyL2(common::Span<GradientPair> mScans, common::Span<int> mKeys, int mLength) { using BlockScan = hipcub::BlockScan<Pair, BLKSIZE, cub::BLOCK_SCAN_WARP_SCANS>; Pair threadData; __shared__ typename BlockScan::TempStorage temp_storage; for (int i = threadIdx.x; i < mLength; i += BLKSIZE - 1) { threadData.key = mKeys[i]; threadData.value = mScans[i]; BlockScan(temp_storage).InclusiveScan(threadData, threadData, AddByKey()); mScans[i] = threadData.value; __syncthreads(); } } template <int BLKDIM_L1L3> __global__ void CubScanByKeyL3(common::Span<GradientPair> sums, common::Span<GradientPair> scans, common::Span<const GradientPair> vals, common::Span<const int> instIds, common::Span<const GradientPair> mScans, common::Span<const int> mKeys, common::Span<const NodeIdT> keys, int nUniqKeys, common::Span<const int> colIds, NodeIdT nodeStart, const int size) { int relId = threadIdx.x; int tid = (blockIdx.x * BLKDIM_L1L3) + relId; // to avoid the following warning from nvcc: // __shared__ memory variable with non-empty constructor or destructor // (potential race between threads) __shared__ char gradBuff[sizeof(GradientPair)]; __shared__ int s_mKeys; GradientPair* s_mScans = reinterpret_cast<GradientPair*>(gradBuff); if (tid >= size) return; // cache block-wide partial scan info if (relId == 0) { s_mKeys = (blockIdx.x > 0) ? mKeys[blockIdx.x - 1] : kNoneKey; s_mScans[0] = (blockIdx.x > 0) ? mScans[blockIdx.x - 1] : GradientPair(); } int myKey = Abs2UniqueKey(tid, keys, colIds, nodeStart, nUniqKeys); int previousKey = tid == 0 ? kNoneKey : Abs2UniqueKey(tid - 1, keys, colIds, nodeStart, nUniqKeys); GradientPair my_value = scans[tid]; __syncthreads(); if (blockIdx.x > 0 && s_mKeys == previousKey) { my_value += s_mScans[0]; } if (tid == size - 1) { sums[previousKey] = my_value + Get(tid, vals, instIds); } if ((previousKey != myKey) && (previousKey >= 0)) { sums[previousKey] = my_value; my_value = GradientPair(0.0f, 0.0f); } scans[tid] = my_value; } /** * @brief Performs fused reduce and scan by key functionality. It is assumed * that * the keys occur contiguously! * @param sums the output gradient reductions for each element performed * key-wise * @param scans the output gradient scans for each element performed key-wise * @param vals the gradients evaluated for each observation. * @param instIds instance ids for each element * @param keys keys to be used to segment the reductions. They need not occur * contiguously in contrast to scan_by_key. Currently, we need one key per * value in the 'vals' array. * @param size number of elements in the 'vals' array * @param nUniqKeys max number of uniq keys found per column * @param nCols number of columns * @param tmpScans temporary scan buffer needed for cub-pyramid algo * @param tmpKeys temporary key buffer needed for cub-pyramid algo * @param colIds column indices for each element in the array * @param nodeStart index of the leftmost node in the current level */ template <int BLKDIM_L1L3 = 256, int BLKDIM_L2 = 512> void ReduceScanByKey(common::Span<GradientPair> sums, common::Span<GradientPair> scans, common::Span<GradientPair> vals, common::Span<const int> instIds, common::Span<const NodeIdT> keys, int size, int nUniqKeys, int nCols, common::Span<GradientPair> tmpScans, common::Span<int> tmpKeys, common::Span<const int> colIds, NodeIdT nodeStart) { int nBlks = dh::DivRoundUp(size, BLKDIM_L1L3); hipMemset(sums.data(), 0, nUniqKeys * nCols * sizeof(GradientPair)); hipLaunchKernelGGL(( CubScanByKeyL1<BLKDIM_L1L3>) , dim3(nBlks), dim3(BLKDIM_L1L3), 0, 0, scans, vals, instIds, tmpScans, tmpKeys, keys, nUniqKeys, colIds, nodeStart, size); hipLaunchKernelGGL(( CubScanByKeyL2<BLKDIM_L2>), dim3(1), dim3(BLKDIM_L2), 0, 0, tmpScans, tmpKeys, nBlks); hipLaunchKernelGGL(( CubScanByKeyL3<BLKDIM_L1L3>) , dim3(nBlks), dim3(BLKDIM_L1L3), 0, 0, sums, scans, vals, instIds, tmpScans, tmpKeys, keys, nUniqKeys, colIds, nodeStart, size); } /** * @struct ExactSplitCandidate * @brief Abstraction of a possible split in the decision tree */ struct ExactSplitCandidate { /** the optimal gain score for this node */ float score; /** index where to split in the DMatrix */ int index; HOST_DEV_INLINE ExactSplitCandidate() : score{-FLT_MAX}, index{INT_MAX} {} /** * @brief Whether the split info is valid to be used to create a new child * @param minSplitLoss minimum score above which decision to split is made * @return true if splittable, else false */ HOST_DEV_INLINE bool IsSplittable(float minSplitLoss) const { return ((score >= minSplitLoss) && (index != INT_MAX)); } }; /** * @enum ArgMaxByKeyAlgo best_split_evaluation.cuh * @brief Help decide which algorithm to use for multi-argmax operation */ enum ArgMaxByKeyAlgo { /** simplest, use gmem-atomics for all updates */ kAbkGmem = 0, /** use smem-atomics for updates (when number of keys are less) */ kAbkSmem }; /** max depth until which to use shared mem based atomics for argmax */ static const int kMaxAbkLevels = 3; HOST_DEV_INLINE ExactSplitCandidate MaxSplit(ExactSplitCandidate a, ExactSplitCandidate b) { ExactSplitCandidate out; if (a.score < b.score) { out.score = b.score; out.index = b.index; } else if (a.score == b.score) { out.score = a.score; out.index = (a.index < b.index) ? a.index : b.index; } else { out.score = a.score; out.index = a.index; } return out; } DEV_INLINE void AtomicArgMax(ExactSplitCandidate* address, ExactSplitCandidate val) { unsigned long long* intAddress = reinterpret_cast<unsigned long long*>(address); // NOLINT unsigned long long old = *intAddress; // NOLINT unsigned long long assumed = old; // NOLINT do { assumed = old; ExactSplitCandidate res = MaxSplit(val, *reinterpret_cast<ExactSplitCandidate*>(&assumed)); old = atomicCAS(intAddress, assumed, *reinterpret_cast<uint64_t*>(&res)); } while (assumed != old); } DEV_INLINE void ArgMaxWithAtomics( int id, common::Span<ExactSplitCandidate> nodeSplits, common::Span<const GradientPair> gradScans, common::Span<const GradientPair> gradSums, common::Span<const float> vals, common::Span<const int> colIds, common::Span<const NodeIdT> nodeAssigns, common::Span<const DeviceNodeStats> nodes, int nUniqKeys, NodeIdT nodeStart, int len, const GPUTrainingParam& param) { int nodeId = nodeAssigns[id]; // @todo: this is really a bad check! but will be fixed when we move // to key-based reduction if ((id == 0) || !((nodeId == nodeAssigns[id - 1]) && (colIds[id] == colIds[id - 1]) && (vals[id] == vals[id - 1]))) { if (nodeId != kUnusedNode) { int sumId = Abs2UniqueKey(id, nodeAssigns, colIds, nodeStart, nUniqKeys); GradientPair colSum = gradSums[sumId]; int uid = nodeId - nodeStart; DeviceNodeStats node_stat = nodes[nodeId]; GradientPair parentSum = node_stat.sum_gradients; float parentGain = node_stat.root_gain; bool tmp; ExactSplitCandidate s; GradientPair missing = parentSum - colSum; s.score = LossChangeMissing(gradScans[id], missing, parentSum, parentGain, param, tmp); s.index = id; AtomicArgMax(&nodeSplits[uid], s); } // end if nodeId != UNUSED_NODE } // end if id == 0 ... } __global__ void AtomicArgMaxByKeyGmem( common::Span<ExactSplitCandidate> nodeSplits, common::Span<const GradientPair> gradScans, common::Span<const GradientPair> gradSums, common::Span<const float> vals, common::Span<const int> colIds, common::Span<const NodeIdT> nodeAssigns, common::Span<const DeviceNodeStats> nodes, int nUniqKeys, NodeIdT nodeStart, int len, const TrainParam param) { int id = threadIdx.x + (blockIdx.x * blockDim.x); const int stride = blockDim.x * gridDim.x; for (; id < len; id += stride) { ArgMaxWithAtomics(id, nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, GPUTrainingParam(param)); } } __global__ void AtomicArgMaxByKeySmem( common::Span<ExactSplitCandidate> nodeSplits, common::Span<const GradientPair> gradScans, common::Span<const GradientPair> gradSums, common::Span<const float> vals, common::Span<const int> colIds, common::Span<const NodeIdT> nodeAssigns, common::Span<const DeviceNodeStats> nodes, int nUniqKeys, NodeIdT nodeStart, int len, const GPUTrainingParam param) { extern __shared__ char sArr[]; common::Span<ExactSplitCandidate> sNodeSplits = common::Span<ExactSplitCandidate>( reinterpret_cast<ExactSplitCandidate*>(sArr), static_cast<typename common::Span<ExactSplitCandidate>::index_type>( nUniqKeys * sizeof(ExactSplitCandidate))); int tid = threadIdx.x; ExactSplitCandidate defVal; for (int i = tid; i < nUniqKeys; i += blockDim.x) { sNodeSplits[i] = defVal; } __syncthreads(); int id = tid + (blockIdx.x * blockDim.x); const int stride = blockDim.x * gridDim.x; for (; id < len; id += stride) { ArgMaxWithAtomics(id, sNodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, param); } __syncthreads(); for (int i = tid; i < nUniqKeys; i += blockDim.x) { ExactSplitCandidate s = sNodeSplits[i]; AtomicArgMax(&nodeSplits[i], s); } } /** * @brief Performs argmax_by_key functionality but for cases when keys need not * occur contiguously * @param nodeSplits will contain information on best split for each node * @param gradScans exclusive sum on sorted segments for each col * @param gradSums gradient sum for each column in DMatrix based on to node-ids * @param vals feature values * @param colIds column index for each element in the feature values array * @param nodeAssigns node-id assignments to each element in DMatrix * @param nodes pointer to all nodes for this tree in BFS order * @param nUniqKeys number of unique node-ids in this level * @param nodeStart start index of the node-ids in this level * @param len number of elements * @param param training parameters * @param algo which algorithm to use for argmax_by_key */ template <int BLKDIM = 256, int ITEMS_PER_THREAD = 4> void ArgMaxByKey(common::Span<ExactSplitCandidate> nodeSplits, common::Span<const GradientPair> gradScans, common::Span<const GradientPair> gradSums, common::Span<const float> vals, common::Span<const int> colIds, common::Span<const NodeIdT> nodeAssigns, common::Span<const DeviceNodeStats> nodes, int nUniqKeys, NodeIdT nodeStart, int len, const TrainParam param, ArgMaxByKeyAlgo algo) { dh::FillConst<ExactSplitCandidate, BLKDIM, ITEMS_PER_THREAD>( param.gpu_id, nodeSplits.data(), nUniqKeys, ExactSplitCandidate()); int nBlks = dh::DivRoundUp(len, ITEMS_PER_THREAD * BLKDIM); switch (algo) { case kAbkGmem: hipLaunchKernelGGL(( AtomicArgMaxByKeyGmem), dim3(nBlks), dim3(BLKDIM), 0, 0, nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, param); break; case kAbkSmem: hipLaunchKernelGGL(( AtomicArgMaxByKeySmem), dim3(nBlks), dim3(BLKDIM), sizeof(ExactSplitCandidate) * nUniqKeys, 0, nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, GPUTrainingParam(param)); break; default: throw std::runtime_error("argMaxByKey: Bad algo passed!"); } } __global__ void AssignColIds(int* colIds, const int* colOffsets) { int myId = blockIdx.x; int start = colOffsets[myId]; int end = colOffsets[myId + 1]; for (int id = start + threadIdx.x; id < end; id += blockDim.x) { colIds[id] = myId; } } __global__ void FillDefaultNodeIds(NodeIdT* nodeIdsPerInst, const DeviceNodeStats* nodes, int n_rows) { int id = threadIdx.x + (blockIdx.x * blockDim.x); if (id >= n_rows) { return; } // if this element belongs to none of the currently active node-id's NodeIdT nId = nodeIdsPerInst[id]; if (nId == kUnusedNode) { return; } const DeviceNodeStats n = nodes[nId]; NodeIdT result; if (n.IsLeaf() || n.IsUnused()) { result = kUnusedNode; } else if (n.dir == kLeftDir) { result = (2 * n.idx) + 1; } else { result = (2 * n.idx) + 2; } nodeIdsPerInst[id] = result; } __global__ void AssignNodeIds(NodeIdT* nodeIdsPerInst, int* nodeLocations, const NodeIdT* nodeIds, const int* instId, const DeviceNodeStats* nodes, const int* colOffsets, const float* vals, int nVals, int nCols) { int id = threadIdx.x + (blockIdx.x * blockDim.x); const int stride = blockDim.x * gridDim.x; for (; id < nVals; id += stride) { // fusing generation of indices for node locations nodeLocations[id] = id; // using nodeIds here since the previous kernel would have updated // the nodeIdsPerInst with all default assignments int nId = nodeIds[id]; // if this element belongs to none of the currently active node-id's if (nId != kUnusedNode) { const DeviceNodeStats n = nodes[nId]; int colId = n.fidx; // printf("nid=%d colId=%d id=%d\n", nId, colId, id); int start = colOffsets[colId]; int end = colOffsets[colId + 1]; // @todo: too much wasteful threads!! if ((id >= start) && (id < end) && !(n.IsLeaf() || n.IsUnused())) { NodeIdT result = (2 * n.idx) + 1 + (vals[id] >= n.fvalue); nodeIdsPerInst[instId[id]] = result; } } } } __global__ void MarkLeavesKernel(DeviceNodeStats* nodes, int len) { int id = (blockIdx.x * blockDim.x) + threadIdx.x; if ((id < len) && !nodes[id].IsUnused()) { int lid = (id << 1) + 1; int rid = (id << 1) + 2; if ((lid >= len) || (rid >= len)) { nodes[id].root_gain = -FLT_MAX; // bottom-most nodes } else if (nodes[lid].IsUnused() && nodes[rid].IsUnused()) { nodes[id].root_gain = -FLT_MAX; // unused child nodes } } } class GPUMaker : public TreeUpdater { protected: TrainParam param_; /** whether we have initialized memory already (so as not to repeat!) */ bool allocated_; /** feature values stored in column-major compressed format */ dh::DVec2<float> vals_; dh::DVec<float> vals_cached_; /** corresponding instance id's of these featutre values */ dh::DVec2<int> instIds_; dh::DVec<int> inst_ids_cached_; /** column offsets for these feature values */ dh::DVec<int> colOffsets_; dh::DVec<GradientPair> gradsInst_; dh::DVec2<NodeIdT> nodeAssigns_; dh::DVec2<int> nodeLocations_; dh::DVec<DeviceNodeStats> nodes_; dh::DVec<NodeIdT> node_assigns_per_inst_; dh::DVec<GradientPair> gradsums_; dh::DVec<GradientPair> gradscans_; dh::DVec<ExactSplitCandidate> nodeSplits_; int n_vals_; int n_rows_; int n_cols_; int maxNodes_; int maxLeaves_; // devices are only used for resharding the HostDeviceVector passed as a parameter; // the algorithm works with a single GPU only GPUSet devices_; dh::CubMemory tmp_mem_; dh::DVec<GradientPair> tmpScanGradBuff_; dh::DVec<int> tmp_scan_key_buff_; dh::DVec<int> colIds_; dh::BulkAllocator<dh::MemoryType::kDevice> ba_; public: GPUMaker() : allocated_{false} {} ~GPUMaker() override = default; void Init(const std::vector<std::pair<std::string, std::string>> &args) override { param_.InitAllowUnknown(args); maxNodes_ = (1 << (param_.max_depth + 1)) - 1; maxLeaves_ = 1 << param_.max_depth; devices_ = GPUSet::All(param_.gpu_id, param_.n_gpus); } void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { // rescale learning rate according to size of trees float lr = param_.learning_rate; param_.learning_rate = lr / trees.size(); gpair->Reshard(devices_); try { // build tree for (auto tree : trees) { UpdateTree(gpair, dmat, tree); } } catch (const std::exception& e) { LOG(FATAL) << "grow_gpu exception: " << e.what() << std::endl; } param_.learning_rate = lr; } /// @note: Update should be only after Init!! void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, RegTree* hTree) { if (!allocated_) { SetupOneTimeData(dmat); } for (int i = 0; i < param_.max_depth; ++i) { if (i == 0) { // make sure to start on a fresh tree with sorted values! vals_.CurrentDVec() = vals_cached_; instIds_.CurrentDVec() = inst_ids_cached_; TransferGrads(gpair); } int nNodes = 1 << i; NodeIdT nodeStart = nNodes - 1; InitNodeData(i, nodeStart, nNodes); FindSplit(i, nodeStart, nNodes); } // mark all the used nodes with unused children as leaf nodes MarkLeaves(); Dense2SparseTree(hTree, nodes_, param_); } void Split2Node(int nNodes, NodeIdT nodeStart) { auto d_nodes = nodes_.GetSpan(); auto d_gradScans = gradscans_.GetSpan(); auto d_gradsums = gradsums_.GetSpan(); auto d_nodeAssigns = nodeAssigns_.CurrentSpan(); auto d_colIds = colIds_.GetSpan(); auto d_vals = vals_.Current(); auto d_nodeSplits = nodeSplits_.Data(); int nUniqKeys = nNodes; float min_split_loss = param_.min_split_loss; auto gpu_param = GPUTrainingParam(param_); dh::LaunchN(param_.gpu_id, nNodes, [=] __device__(int uid) { int absNodeId = uid + nodeStart; ExactSplitCandidate s = d_nodeSplits[uid]; if (s.IsSplittable(min_split_loss)) { int idx = s.index; int nodeInstId = Abs2UniqueKey(idx, d_nodeAssigns, d_colIds, nodeStart, nUniqKeys); bool missingLeft = true; const DeviceNodeStats& n = d_nodes[absNodeId]; GradientPair gradScan = d_gradScans[idx]; GradientPair gradSum = d_gradsums[nodeInstId]; float thresh = d_vals[idx]; int colId = d_colIds[idx]; // get the default direction for the current node GradientPair missing = n.sum_gradients - gradSum; LossChangeMissing(gradScan, missing, n.sum_gradients, n.root_gain, gpu_param, missingLeft); // get the score/weight/id/gradSum for left and right child nodes GradientPair lGradSum = missingLeft ? gradScan + missing : gradScan; GradientPair rGradSum = n.sum_gradients - lGradSum; // Create children d_nodes[LeftChildNodeIdx(absNodeId)] = DeviceNodeStats(lGradSum, LeftChildNodeIdx(absNodeId), gpu_param); d_nodes[RightChildNodeIdx(absNodeId)] = DeviceNodeStats(rGradSum, RightChildNodeIdx(absNodeId), gpu_param); // Set split for parent d_nodes[absNodeId].SetSplit(thresh, colId, missingLeft ? kLeftDir : kRightDir, lGradSum, rGradSum); } else { // cannot be split further, so this node is a leaf! d_nodes[absNodeId].root_gain = -FLT_MAX; } }); } void FindSplit(int level, NodeIdT nodeStart, int nNodes) { ReduceScanByKey(gradsums_.GetSpan(), gradscans_.GetSpan(), gradsInst_.GetSpan(), instIds_.CurrentSpan(), nodeAssigns_.CurrentSpan(), n_vals_, nNodes, n_cols_, tmpScanGradBuff_.GetSpan(), tmp_scan_key_buff_.GetSpan(), colIds_.GetSpan(), nodeStart); ArgMaxByKey(nodeSplits_.GetSpan(), gradscans_.GetSpan(), gradsums_.GetSpan(), vals_.CurrentSpan(), colIds_.GetSpan(), nodeAssigns_.CurrentSpan(), nodes_.GetSpan(), nNodes, nodeStart, n_vals_, param_, level <= kMaxAbkLevels ? kAbkSmem : kAbkGmem); Split2Node(nNodes, nodeStart); } void AllocateAllData(int offsetSize) { int tmpBuffSize = ScanTempBufferSize(n_vals_); ba_.Allocate(param_.gpu_id, &vals_, n_vals_, &vals_cached_, n_vals_, &instIds_, n_vals_, &inst_ids_cached_, n_vals_, &colOffsets_, offsetSize, &gradsInst_, n_rows_, &nodeAssigns_, n_vals_, &nodeLocations_, n_vals_, &nodes_, maxNodes_, &node_assigns_per_inst_, n_rows_, &gradsums_, maxLeaves_ * n_cols_, &gradscans_, n_vals_, &nodeSplits_, maxLeaves_, &tmpScanGradBuff_, tmpBuffSize, &tmp_scan_key_buff_, tmpBuffSize, &colIds_, n_vals_); } void SetupOneTimeData(DMatrix* dmat) { if (!dmat->SingleColBlock()) { LOG(FATAL) << "exact::GPUBuilder - must have 1 column block"; } std::vector<float> fval; std::vector<int> fId; std::vector<size_t> offset; ConvertToCsc(dmat, &fval, &fId, &offset); AllocateAllData(static_cast<int>(offset.size())); TransferAndSortData(fval, fId, offset); allocated_ = true; } void ConvertToCsc(DMatrix* dmat, std::vector<float>* fval, std::vector<int>* fId, std::vector<size_t>* offset) { const MetaInfo& info = dmat->Info(); CHECK(info.num_col_ < std::numeric_limits<int>::max()); CHECK(info.num_row_ < std::numeric_limits<int>::max()); n_rows_ = static_cast<int>(info.num_row_); n_cols_ = static_cast<int>(info.num_col_); offset->reserve(n_cols_ + 1); offset->push_back(0); fval->reserve(n_cols_ * n_rows_); fId->reserve(n_cols_ * n_rows_); // in case you end up with a DMatrix having no column access // then make sure to enable that before copying the data! for (const auto& batch : dmat->GetSortedColumnBatches()) { for (int i = 0; i < batch.Size(); i++) { auto col = batch[i]; for (const Entry& e : col) { int inst_id = static_cast<int>(e.index); fval->push_back(e.fvalue); fId->push_back(inst_id); } offset->push_back(fval->size()); } } CHECK(fval->size() < std::numeric_limits<int>::max()); n_vals_ = static_cast<int>(fval->size()); } void TransferAndSortData(const std::vector<float>& fval, const std::vector<int>& fId, const std::vector<size_t>& offset) { vals_.CurrentDVec() = fval; instIds_.CurrentDVec() = fId; colOffsets_ = offset; dh::SegmentedSort<float, int>(&tmp_mem_, &vals_, &instIds_, n_vals_, n_cols_, colOffsets_); vals_cached_ = vals_.CurrentDVec(); inst_ids_cached_ = instIds_.CurrentDVec(); hipLaunchKernelGGL(( AssignColIds), dim3(n_cols_), dim3(512), 0, 0, colIds_.Data(), colOffsets_.Data()); } void TransferGrads(HostDeviceVector<GradientPair>* gpair) { gpair->GatherTo(gradsInst_.tbegin(), gradsInst_.tend()); // evaluate the full-grad reduction for the root node dh::SumReduction<GradientPair>(tmp_mem_, gradsInst_, gradsums_, n_rows_); } void InitNodeData(int level, NodeIdT nodeStart, int nNodes) { // all instances belong to root node at the beginning! if (level == 0) { nodes_.Fill(DeviceNodeStats()); nodeAssigns_.CurrentDVec().Fill(0); node_assigns_per_inst_.Fill(0); // for root node, just update the gradient/score/weight/id info // before splitting it! Currently all data is on GPU, hence this // stupid little kernel auto d_nodes = nodes_.Data(); auto d_sums = gradsums_.Data(); auto gpu_params = GPUTrainingParam(param_); dh::LaunchN(param_.gpu_id, 1, [=] __device__(int idx) { d_nodes[0] = DeviceNodeStats(d_sums[0], 0, gpu_params); }); } else { const int BlkDim = 256; const int ItemsPerThread = 4; // assign default node ids first int nBlks = dh::DivRoundUp(n_rows_, BlkDim); hipLaunchKernelGGL(( FillDefaultNodeIds), dim3(nBlks), dim3(BlkDim), 0, 0, node_assigns_per_inst_.Data(), nodes_.Data(), n_rows_); // evaluate the correct child indices of non-missing values next nBlks = dh::DivRoundUp(n_vals_, BlkDim * ItemsPerThread); hipLaunchKernelGGL(( AssignNodeIds), dim3(nBlks), dim3(BlkDim), 0, 0, node_assigns_per_inst_.Data(), nodeLocations_.Current(), nodeAssigns_.Current(), instIds_.Current(), nodes_.Data(), colOffsets_.Data(), vals_.Current(), n_vals_, n_cols_); // gather the node assignments across all other columns too dh::Gather(param_.gpu_id, nodeAssigns_.Current(), node_assigns_per_inst_.Data(), instIds_.Current(), n_vals_); SortKeys(level); } } void SortKeys(int level) { // segmented-sort the arrays based on node-id's // but we don't need more than level+1 bits for sorting! SegmentedSort(&tmp_mem_, &nodeAssigns_, &nodeLocations_, n_vals_, n_cols_, colOffsets_, 0, level + 1); dh::Gather<float, int>(param_.gpu_id, vals_.other(), vals_.Current(), instIds_.other(), instIds_.Current(), nodeLocations_.Current(), n_vals_); vals_.buff().selector ^= 1; instIds_.buff().selector ^= 1; } void MarkLeaves() { const int BlkDim = 128; int nBlks = dh::DivRoundUp(maxNodes_, BlkDim); hipLaunchKernelGGL(( MarkLeavesKernel), dim3(nBlks), dim3(BlkDim), 0, 0, nodes_.Data(), maxNodes_); } }; XGBOOST_REGISTER_TREE_UPDATER(GPUMaker, "grow_gpu") .describe("Grow tree with GPU.") .set_body([]() { return new GPUMaker(); }); } // namespace tree } // namespace xgboost
4f4aa4fcd51df3a924951a92edbb026bb8a64fe9.cu
/*! * Copyright 2017-2018 XGBoost contributors */ #include <xgboost/tree_updater.h> #include <utility> #include <vector> #include <limits> #include <string> #include "../common/common.h" #include "param.h" #include "updater_gpu_common.cuh" namespace xgboost { namespace tree { DMLC_REGISTRY_FILE_TAG(updater_gpu); template <typename GradientPairT> XGBOOST_DEVICE float inline LossChangeMissing(const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum, const float& parent_gain, const GPUTrainingParam& param, bool& missing_left_out) { // NOLINT // Put gradients of missing values to left float missing_left_loss = DeviceCalcLossChange(param, scan + missing, parent_sum, parent_gain); float missing_right_loss = DeviceCalcLossChange(param, scan, parent_sum, parent_gain); if (missing_left_loss >= missing_right_loss) { missing_left_out = true; return missing_left_loss; } else { missing_left_out = false; return missing_right_loss; } } /** * @brief Absolute BFS order IDs to col-wise unique IDs based on user input * @param tid the index of the element that this thread should access * @param abs the array of absolute IDs * @param colIds the array of column IDs for each element * @param nodeStart the start of the node ID at this level * @param nKeys number of nodes at this level. * @return the uniq key */ static HOST_DEV_INLINE NodeIdT Abs2UniqueKey(int tid, common::Span<const NodeIdT> abs, common::Span<const int> colIds, NodeIdT nodeStart, int nKeys) { int a = abs[tid]; if (a == kUnusedNode) return a; return ((a - nodeStart) + (colIds[tid] * nKeys)); } /** * @struct Pair * @brief Pair used for key basd scan operations on GradientPair */ struct Pair { int key; GradientPair value; }; /** define a key that's not used at all in the entire boosting process */ static const int kNoneKey = -100; /** * @brief Allocate temporary buffers needed for scan operations * @param tmpScans gradient buffer * @param tmpKeys keys buffer * @param size number of elements that will be scanned */ template <int BLKDIM_L1L3 = 256> int ScanTempBufferSize(int size) { int num_blocks = dh::DivRoundUp(size, BLKDIM_L1L3); return num_blocks; } struct AddByKey { template <typename T> HOST_DEV_INLINE T operator()(const T& first, const T& second) const { T result; if (first.key == second.key) { result.key = first.key; result.value = first.value + second.value; } else { result.key = second.key; result.value = second.value; } return result; } }; /** * @brief Gradient value getter function * @param id the index into the vals or instIds array to which to fetch * @param vals the gradient value buffer * @param instIds instance index buffer * @return the expected gradient value */ HOST_DEV_INLINE GradientPair Get(int id, common::Span<const GradientPair> vals, common::Span<const int> instIds) { id = instIds[id]; return vals[id]; } template <int BLKDIM_L1L3> __global__ void CubScanByKeyL1( common::Span<GradientPair> scans, common::Span<const GradientPair> vals, common::Span<const int> instIds, common::Span<GradientPair> mScans, common::Span<int> mKeys, common::Span<const NodeIdT> keys, int nUniqKeys, common::Span<const int> colIds, NodeIdT nodeStart, const int size) { Pair rootPair = {kNoneKey, GradientPair(0.f, 0.f)}; int myKey; GradientPair myValue; using BlockScan = cub::BlockScan<Pair, BLKDIM_L1L3>; __shared__ typename BlockScan::TempStorage temp_storage; Pair threadData; int tid = blockIdx.x * BLKDIM_L1L3 + threadIdx.x; if (tid < size) { myKey = Abs2UniqueKey(tid, keys, colIds, nodeStart, nUniqKeys); myValue = Get(tid, vals, instIds); } else { myKey = kNoneKey; myValue = {}; } threadData.key = myKey; threadData.value = myValue; // get previous key, especially needed for the last thread in this block // in order to pass on the partial scan values. // this statement MUST appear before the checks below! // else, the result of this shuffle operation will be undefined #if (__CUDACC_VER_MAJOR__ >= 9) int previousKey = __shfl_up_sync(0xFFFFFFFF, myKey, 1); #else int previousKey = __shfl_up(myKey, 1); #endif // Collectively compute the block-wide exclusive prefix sum BlockScan(temp_storage) .ExclusiveScan(threadData, threadData, rootPair, AddByKey()); if (tid < size) { scans[tid] = threadData.value; } else { return; } if (threadIdx.x == BLKDIM_L1L3 - 1) { threadData.value = (myKey == previousKey) ? threadData.value : GradientPair(0.0f, 0.0f); mKeys[blockIdx.x] = myKey; mScans[blockIdx.x] = threadData.value + myValue; } } template <int BLKSIZE> __global__ void CubScanByKeyL2(common::Span<GradientPair> mScans, common::Span<int> mKeys, int mLength) { using BlockScan = cub::BlockScan<Pair, BLKSIZE, cub::BLOCK_SCAN_WARP_SCANS>; Pair threadData; __shared__ typename BlockScan::TempStorage temp_storage; for (int i = threadIdx.x; i < mLength; i += BLKSIZE - 1) { threadData.key = mKeys[i]; threadData.value = mScans[i]; BlockScan(temp_storage).InclusiveScan(threadData, threadData, AddByKey()); mScans[i] = threadData.value; __syncthreads(); } } template <int BLKDIM_L1L3> __global__ void CubScanByKeyL3(common::Span<GradientPair> sums, common::Span<GradientPair> scans, common::Span<const GradientPair> vals, common::Span<const int> instIds, common::Span<const GradientPair> mScans, common::Span<const int> mKeys, common::Span<const NodeIdT> keys, int nUniqKeys, common::Span<const int> colIds, NodeIdT nodeStart, const int size) { int relId = threadIdx.x; int tid = (blockIdx.x * BLKDIM_L1L3) + relId; // to avoid the following warning from nvcc: // __shared__ memory variable with non-empty constructor or destructor // (potential race between threads) __shared__ char gradBuff[sizeof(GradientPair)]; __shared__ int s_mKeys; GradientPair* s_mScans = reinterpret_cast<GradientPair*>(gradBuff); if (tid >= size) return; // cache block-wide partial scan info if (relId == 0) { s_mKeys = (blockIdx.x > 0) ? mKeys[blockIdx.x - 1] : kNoneKey; s_mScans[0] = (blockIdx.x > 0) ? mScans[blockIdx.x - 1] : GradientPair(); } int myKey = Abs2UniqueKey(tid, keys, colIds, nodeStart, nUniqKeys); int previousKey = tid == 0 ? kNoneKey : Abs2UniqueKey(tid - 1, keys, colIds, nodeStart, nUniqKeys); GradientPair my_value = scans[tid]; __syncthreads(); if (blockIdx.x > 0 && s_mKeys == previousKey) { my_value += s_mScans[0]; } if (tid == size - 1) { sums[previousKey] = my_value + Get(tid, vals, instIds); } if ((previousKey != myKey) && (previousKey >= 0)) { sums[previousKey] = my_value; my_value = GradientPair(0.0f, 0.0f); } scans[tid] = my_value; } /** * @brief Performs fused reduce and scan by key functionality. It is assumed * that * the keys occur contiguously! * @param sums the output gradient reductions for each element performed * key-wise * @param scans the output gradient scans for each element performed key-wise * @param vals the gradients evaluated for each observation. * @param instIds instance ids for each element * @param keys keys to be used to segment the reductions. They need not occur * contiguously in contrast to scan_by_key. Currently, we need one key per * value in the 'vals' array. * @param size number of elements in the 'vals' array * @param nUniqKeys max number of uniq keys found per column * @param nCols number of columns * @param tmpScans temporary scan buffer needed for cub-pyramid algo * @param tmpKeys temporary key buffer needed for cub-pyramid algo * @param colIds column indices for each element in the array * @param nodeStart index of the leftmost node in the current level */ template <int BLKDIM_L1L3 = 256, int BLKDIM_L2 = 512> void ReduceScanByKey(common::Span<GradientPair> sums, common::Span<GradientPair> scans, common::Span<GradientPair> vals, common::Span<const int> instIds, common::Span<const NodeIdT> keys, int size, int nUniqKeys, int nCols, common::Span<GradientPair> tmpScans, common::Span<int> tmpKeys, common::Span<const int> colIds, NodeIdT nodeStart) { int nBlks = dh::DivRoundUp(size, BLKDIM_L1L3); cudaMemset(sums.data(), 0, nUniqKeys * nCols * sizeof(GradientPair)); CubScanByKeyL1<BLKDIM_L1L3> <<<nBlks, BLKDIM_L1L3>>>(scans, vals, instIds, tmpScans, tmpKeys, keys, nUniqKeys, colIds, nodeStart, size); CubScanByKeyL2<BLKDIM_L2><<<1, BLKDIM_L2>>>(tmpScans, tmpKeys, nBlks); CubScanByKeyL3<BLKDIM_L1L3> <<<nBlks, BLKDIM_L1L3>>>(sums, scans, vals, instIds, tmpScans, tmpKeys, keys, nUniqKeys, colIds, nodeStart, size); } /** * @struct ExactSplitCandidate * @brief Abstraction of a possible split in the decision tree */ struct ExactSplitCandidate { /** the optimal gain score for this node */ float score; /** index where to split in the DMatrix */ int index; HOST_DEV_INLINE ExactSplitCandidate() : score{-FLT_MAX}, index{INT_MAX} {} /** * @brief Whether the split info is valid to be used to create a new child * @param minSplitLoss minimum score above which decision to split is made * @return true if splittable, else false */ HOST_DEV_INLINE bool IsSplittable(float minSplitLoss) const { return ((score >= minSplitLoss) && (index != INT_MAX)); } }; /** * @enum ArgMaxByKeyAlgo best_split_evaluation.cuh * @brief Help decide which algorithm to use for multi-argmax operation */ enum ArgMaxByKeyAlgo { /** simplest, use gmem-atomics for all updates */ kAbkGmem = 0, /** use smem-atomics for updates (when number of keys are less) */ kAbkSmem }; /** max depth until which to use shared mem based atomics for argmax */ static const int kMaxAbkLevels = 3; HOST_DEV_INLINE ExactSplitCandidate MaxSplit(ExactSplitCandidate a, ExactSplitCandidate b) { ExactSplitCandidate out; if (a.score < b.score) { out.score = b.score; out.index = b.index; } else if (a.score == b.score) { out.score = a.score; out.index = (a.index < b.index) ? a.index : b.index; } else { out.score = a.score; out.index = a.index; } return out; } DEV_INLINE void AtomicArgMax(ExactSplitCandidate* address, ExactSplitCandidate val) { unsigned long long* intAddress = reinterpret_cast<unsigned long long*>(address); // NOLINT unsigned long long old = *intAddress; // NOLINT unsigned long long assumed = old; // NOLINT do { assumed = old; ExactSplitCandidate res = MaxSplit(val, *reinterpret_cast<ExactSplitCandidate*>(&assumed)); old = atomicCAS(intAddress, assumed, *reinterpret_cast<uint64_t*>(&res)); } while (assumed != old); } DEV_INLINE void ArgMaxWithAtomics( int id, common::Span<ExactSplitCandidate> nodeSplits, common::Span<const GradientPair> gradScans, common::Span<const GradientPair> gradSums, common::Span<const float> vals, common::Span<const int> colIds, common::Span<const NodeIdT> nodeAssigns, common::Span<const DeviceNodeStats> nodes, int nUniqKeys, NodeIdT nodeStart, int len, const GPUTrainingParam& param) { int nodeId = nodeAssigns[id]; // @todo: this is really a bad check! but will be fixed when we move // to key-based reduction if ((id == 0) || !((nodeId == nodeAssigns[id - 1]) && (colIds[id] == colIds[id - 1]) && (vals[id] == vals[id - 1]))) { if (nodeId != kUnusedNode) { int sumId = Abs2UniqueKey(id, nodeAssigns, colIds, nodeStart, nUniqKeys); GradientPair colSum = gradSums[sumId]; int uid = nodeId - nodeStart; DeviceNodeStats node_stat = nodes[nodeId]; GradientPair parentSum = node_stat.sum_gradients; float parentGain = node_stat.root_gain; bool tmp; ExactSplitCandidate s; GradientPair missing = parentSum - colSum; s.score = LossChangeMissing(gradScans[id], missing, parentSum, parentGain, param, tmp); s.index = id; AtomicArgMax(&nodeSplits[uid], s); } // end if nodeId != UNUSED_NODE } // end if id == 0 ... } __global__ void AtomicArgMaxByKeyGmem( common::Span<ExactSplitCandidate> nodeSplits, common::Span<const GradientPair> gradScans, common::Span<const GradientPair> gradSums, common::Span<const float> vals, common::Span<const int> colIds, common::Span<const NodeIdT> nodeAssigns, common::Span<const DeviceNodeStats> nodes, int nUniqKeys, NodeIdT nodeStart, int len, const TrainParam param) { int id = threadIdx.x + (blockIdx.x * blockDim.x); const int stride = blockDim.x * gridDim.x; for (; id < len; id += stride) { ArgMaxWithAtomics(id, nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, GPUTrainingParam(param)); } } __global__ void AtomicArgMaxByKeySmem( common::Span<ExactSplitCandidate> nodeSplits, common::Span<const GradientPair> gradScans, common::Span<const GradientPair> gradSums, common::Span<const float> vals, common::Span<const int> colIds, common::Span<const NodeIdT> nodeAssigns, common::Span<const DeviceNodeStats> nodes, int nUniqKeys, NodeIdT nodeStart, int len, const GPUTrainingParam param) { extern __shared__ char sArr[]; common::Span<ExactSplitCandidate> sNodeSplits = common::Span<ExactSplitCandidate>( reinterpret_cast<ExactSplitCandidate*>(sArr), static_cast<typename common::Span<ExactSplitCandidate>::index_type>( nUniqKeys * sizeof(ExactSplitCandidate))); int tid = threadIdx.x; ExactSplitCandidate defVal; for (int i = tid; i < nUniqKeys; i += blockDim.x) { sNodeSplits[i] = defVal; } __syncthreads(); int id = tid + (blockIdx.x * blockDim.x); const int stride = blockDim.x * gridDim.x; for (; id < len; id += stride) { ArgMaxWithAtomics(id, sNodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, param); } __syncthreads(); for (int i = tid; i < nUniqKeys; i += blockDim.x) { ExactSplitCandidate s = sNodeSplits[i]; AtomicArgMax(&nodeSplits[i], s); } } /** * @brief Performs argmax_by_key functionality but for cases when keys need not * occur contiguously * @param nodeSplits will contain information on best split for each node * @param gradScans exclusive sum on sorted segments for each col * @param gradSums gradient sum for each column in DMatrix based on to node-ids * @param vals feature values * @param colIds column index for each element in the feature values array * @param nodeAssigns node-id assignments to each element in DMatrix * @param nodes pointer to all nodes for this tree in BFS order * @param nUniqKeys number of unique node-ids in this level * @param nodeStart start index of the node-ids in this level * @param len number of elements * @param param training parameters * @param algo which algorithm to use for argmax_by_key */ template <int BLKDIM = 256, int ITEMS_PER_THREAD = 4> void ArgMaxByKey(common::Span<ExactSplitCandidate> nodeSplits, common::Span<const GradientPair> gradScans, common::Span<const GradientPair> gradSums, common::Span<const float> vals, common::Span<const int> colIds, common::Span<const NodeIdT> nodeAssigns, common::Span<const DeviceNodeStats> nodes, int nUniqKeys, NodeIdT nodeStart, int len, const TrainParam param, ArgMaxByKeyAlgo algo) { dh::FillConst<ExactSplitCandidate, BLKDIM, ITEMS_PER_THREAD>( param.gpu_id, nodeSplits.data(), nUniqKeys, ExactSplitCandidate()); int nBlks = dh::DivRoundUp(len, ITEMS_PER_THREAD * BLKDIM); switch (algo) { case kAbkGmem: AtomicArgMaxByKeyGmem<<<nBlks, BLKDIM>>>( nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, param); break; case kAbkSmem: AtomicArgMaxByKeySmem<<<nBlks, BLKDIM, sizeof(ExactSplitCandidate) * nUniqKeys>>>( nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, GPUTrainingParam(param)); break; default: throw std::runtime_error("argMaxByKey: Bad algo passed!"); } } __global__ void AssignColIds(int* colIds, const int* colOffsets) { int myId = blockIdx.x; int start = colOffsets[myId]; int end = colOffsets[myId + 1]; for (int id = start + threadIdx.x; id < end; id += blockDim.x) { colIds[id] = myId; } } __global__ void FillDefaultNodeIds(NodeIdT* nodeIdsPerInst, const DeviceNodeStats* nodes, int n_rows) { int id = threadIdx.x + (blockIdx.x * blockDim.x); if (id >= n_rows) { return; } // if this element belongs to none of the currently active node-id's NodeIdT nId = nodeIdsPerInst[id]; if (nId == kUnusedNode) { return; } const DeviceNodeStats n = nodes[nId]; NodeIdT result; if (n.IsLeaf() || n.IsUnused()) { result = kUnusedNode; } else if (n.dir == kLeftDir) { result = (2 * n.idx) + 1; } else { result = (2 * n.idx) + 2; } nodeIdsPerInst[id] = result; } __global__ void AssignNodeIds(NodeIdT* nodeIdsPerInst, int* nodeLocations, const NodeIdT* nodeIds, const int* instId, const DeviceNodeStats* nodes, const int* colOffsets, const float* vals, int nVals, int nCols) { int id = threadIdx.x + (blockIdx.x * blockDim.x); const int stride = blockDim.x * gridDim.x; for (; id < nVals; id += stride) { // fusing generation of indices for node locations nodeLocations[id] = id; // using nodeIds here since the previous kernel would have updated // the nodeIdsPerInst with all default assignments int nId = nodeIds[id]; // if this element belongs to none of the currently active node-id's if (nId != kUnusedNode) { const DeviceNodeStats n = nodes[nId]; int colId = n.fidx; // printf("nid=%d colId=%d id=%d\n", nId, colId, id); int start = colOffsets[colId]; int end = colOffsets[colId + 1]; // @todo: too much wasteful threads!! if ((id >= start) && (id < end) && !(n.IsLeaf() || n.IsUnused())) { NodeIdT result = (2 * n.idx) + 1 + (vals[id] >= n.fvalue); nodeIdsPerInst[instId[id]] = result; } } } } __global__ void MarkLeavesKernel(DeviceNodeStats* nodes, int len) { int id = (blockIdx.x * blockDim.x) + threadIdx.x; if ((id < len) && !nodes[id].IsUnused()) { int lid = (id << 1) + 1; int rid = (id << 1) + 2; if ((lid >= len) || (rid >= len)) { nodes[id].root_gain = -FLT_MAX; // bottom-most nodes } else if (nodes[lid].IsUnused() && nodes[rid].IsUnused()) { nodes[id].root_gain = -FLT_MAX; // unused child nodes } } } class GPUMaker : public TreeUpdater { protected: TrainParam param_; /** whether we have initialized memory already (so as not to repeat!) */ bool allocated_; /** feature values stored in column-major compressed format */ dh::DVec2<float> vals_; dh::DVec<float> vals_cached_; /** corresponding instance id's of these featutre values */ dh::DVec2<int> instIds_; dh::DVec<int> inst_ids_cached_; /** column offsets for these feature values */ dh::DVec<int> colOffsets_; dh::DVec<GradientPair> gradsInst_; dh::DVec2<NodeIdT> nodeAssigns_; dh::DVec2<int> nodeLocations_; dh::DVec<DeviceNodeStats> nodes_; dh::DVec<NodeIdT> node_assigns_per_inst_; dh::DVec<GradientPair> gradsums_; dh::DVec<GradientPair> gradscans_; dh::DVec<ExactSplitCandidate> nodeSplits_; int n_vals_; int n_rows_; int n_cols_; int maxNodes_; int maxLeaves_; // devices are only used for resharding the HostDeviceVector passed as a parameter; // the algorithm works with a single GPU only GPUSet devices_; dh::CubMemory tmp_mem_; dh::DVec<GradientPair> tmpScanGradBuff_; dh::DVec<int> tmp_scan_key_buff_; dh::DVec<int> colIds_; dh::BulkAllocator<dh::MemoryType::kDevice> ba_; public: GPUMaker() : allocated_{false} {} ~GPUMaker() override = default; void Init(const std::vector<std::pair<std::string, std::string>> &args) override { param_.InitAllowUnknown(args); maxNodes_ = (1 << (param_.max_depth + 1)) - 1; maxLeaves_ = 1 << param_.max_depth; devices_ = GPUSet::All(param_.gpu_id, param_.n_gpus); } void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { // rescale learning rate according to size of trees float lr = param_.learning_rate; param_.learning_rate = lr / trees.size(); gpair->Reshard(devices_); try { // build tree for (auto tree : trees) { UpdateTree(gpair, dmat, tree); } } catch (const std::exception& e) { LOG(FATAL) << "grow_gpu exception: " << e.what() << std::endl; } param_.learning_rate = lr; } /// @note: Update should be only after Init!! void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, RegTree* hTree) { if (!allocated_) { SetupOneTimeData(dmat); } for (int i = 0; i < param_.max_depth; ++i) { if (i == 0) { // make sure to start on a fresh tree with sorted values! vals_.CurrentDVec() = vals_cached_; instIds_.CurrentDVec() = inst_ids_cached_; TransferGrads(gpair); } int nNodes = 1 << i; NodeIdT nodeStart = nNodes - 1; InitNodeData(i, nodeStart, nNodes); FindSplit(i, nodeStart, nNodes); } // mark all the used nodes with unused children as leaf nodes MarkLeaves(); Dense2SparseTree(hTree, nodes_, param_); } void Split2Node(int nNodes, NodeIdT nodeStart) { auto d_nodes = nodes_.GetSpan(); auto d_gradScans = gradscans_.GetSpan(); auto d_gradsums = gradsums_.GetSpan(); auto d_nodeAssigns = nodeAssigns_.CurrentSpan(); auto d_colIds = colIds_.GetSpan(); auto d_vals = vals_.Current(); auto d_nodeSplits = nodeSplits_.Data(); int nUniqKeys = nNodes; float min_split_loss = param_.min_split_loss; auto gpu_param = GPUTrainingParam(param_); dh::LaunchN(param_.gpu_id, nNodes, [=] __device__(int uid) { int absNodeId = uid + nodeStart; ExactSplitCandidate s = d_nodeSplits[uid]; if (s.IsSplittable(min_split_loss)) { int idx = s.index; int nodeInstId = Abs2UniqueKey(idx, d_nodeAssigns, d_colIds, nodeStart, nUniqKeys); bool missingLeft = true; const DeviceNodeStats& n = d_nodes[absNodeId]; GradientPair gradScan = d_gradScans[idx]; GradientPair gradSum = d_gradsums[nodeInstId]; float thresh = d_vals[idx]; int colId = d_colIds[idx]; // get the default direction for the current node GradientPair missing = n.sum_gradients - gradSum; LossChangeMissing(gradScan, missing, n.sum_gradients, n.root_gain, gpu_param, missingLeft); // get the score/weight/id/gradSum for left and right child nodes GradientPair lGradSum = missingLeft ? gradScan + missing : gradScan; GradientPair rGradSum = n.sum_gradients - lGradSum; // Create children d_nodes[LeftChildNodeIdx(absNodeId)] = DeviceNodeStats(lGradSum, LeftChildNodeIdx(absNodeId), gpu_param); d_nodes[RightChildNodeIdx(absNodeId)] = DeviceNodeStats(rGradSum, RightChildNodeIdx(absNodeId), gpu_param); // Set split for parent d_nodes[absNodeId].SetSplit(thresh, colId, missingLeft ? kLeftDir : kRightDir, lGradSum, rGradSum); } else { // cannot be split further, so this node is a leaf! d_nodes[absNodeId].root_gain = -FLT_MAX; } }); } void FindSplit(int level, NodeIdT nodeStart, int nNodes) { ReduceScanByKey(gradsums_.GetSpan(), gradscans_.GetSpan(), gradsInst_.GetSpan(), instIds_.CurrentSpan(), nodeAssigns_.CurrentSpan(), n_vals_, nNodes, n_cols_, tmpScanGradBuff_.GetSpan(), tmp_scan_key_buff_.GetSpan(), colIds_.GetSpan(), nodeStart); ArgMaxByKey(nodeSplits_.GetSpan(), gradscans_.GetSpan(), gradsums_.GetSpan(), vals_.CurrentSpan(), colIds_.GetSpan(), nodeAssigns_.CurrentSpan(), nodes_.GetSpan(), nNodes, nodeStart, n_vals_, param_, level <= kMaxAbkLevels ? kAbkSmem : kAbkGmem); Split2Node(nNodes, nodeStart); } void AllocateAllData(int offsetSize) { int tmpBuffSize = ScanTempBufferSize(n_vals_); ba_.Allocate(param_.gpu_id, &vals_, n_vals_, &vals_cached_, n_vals_, &instIds_, n_vals_, &inst_ids_cached_, n_vals_, &colOffsets_, offsetSize, &gradsInst_, n_rows_, &nodeAssigns_, n_vals_, &nodeLocations_, n_vals_, &nodes_, maxNodes_, &node_assigns_per_inst_, n_rows_, &gradsums_, maxLeaves_ * n_cols_, &gradscans_, n_vals_, &nodeSplits_, maxLeaves_, &tmpScanGradBuff_, tmpBuffSize, &tmp_scan_key_buff_, tmpBuffSize, &colIds_, n_vals_); } void SetupOneTimeData(DMatrix* dmat) { if (!dmat->SingleColBlock()) { LOG(FATAL) << "exact::GPUBuilder - must have 1 column block"; } std::vector<float> fval; std::vector<int> fId; std::vector<size_t> offset; ConvertToCsc(dmat, &fval, &fId, &offset); AllocateAllData(static_cast<int>(offset.size())); TransferAndSortData(fval, fId, offset); allocated_ = true; } void ConvertToCsc(DMatrix* dmat, std::vector<float>* fval, std::vector<int>* fId, std::vector<size_t>* offset) { const MetaInfo& info = dmat->Info(); CHECK(info.num_col_ < std::numeric_limits<int>::max()); CHECK(info.num_row_ < std::numeric_limits<int>::max()); n_rows_ = static_cast<int>(info.num_row_); n_cols_ = static_cast<int>(info.num_col_); offset->reserve(n_cols_ + 1); offset->push_back(0); fval->reserve(n_cols_ * n_rows_); fId->reserve(n_cols_ * n_rows_); // in case you end up with a DMatrix having no column access // then make sure to enable that before copying the data! for (const auto& batch : dmat->GetSortedColumnBatches()) { for (int i = 0; i < batch.Size(); i++) { auto col = batch[i]; for (const Entry& e : col) { int inst_id = static_cast<int>(e.index); fval->push_back(e.fvalue); fId->push_back(inst_id); } offset->push_back(fval->size()); } } CHECK(fval->size() < std::numeric_limits<int>::max()); n_vals_ = static_cast<int>(fval->size()); } void TransferAndSortData(const std::vector<float>& fval, const std::vector<int>& fId, const std::vector<size_t>& offset) { vals_.CurrentDVec() = fval; instIds_.CurrentDVec() = fId; colOffsets_ = offset; dh::SegmentedSort<float, int>(&tmp_mem_, &vals_, &instIds_, n_vals_, n_cols_, colOffsets_); vals_cached_ = vals_.CurrentDVec(); inst_ids_cached_ = instIds_.CurrentDVec(); AssignColIds<<<n_cols_, 512>>>(colIds_.Data(), colOffsets_.Data()); } void TransferGrads(HostDeviceVector<GradientPair>* gpair) { gpair->GatherTo(gradsInst_.tbegin(), gradsInst_.tend()); // evaluate the full-grad reduction for the root node dh::SumReduction<GradientPair>(tmp_mem_, gradsInst_, gradsums_, n_rows_); } void InitNodeData(int level, NodeIdT nodeStart, int nNodes) { // all instances belong to root node at the beginning! if (level == 0) { nodes_.Fill(DeviceNodeStats()); nodeAssigns_.CurrentDVec().Fill(0); node_assigns_per_inst_.Fill(0); // for root node, just update the gradient/score/weight/id info // before splitting it! Currently all data is on GPU, hence this // stupid little kernel auto d_nodes = nodes_.Data(); auto d_sums = gradsums_.Data(); auto gpu_params = GPUTrainingParam(param_); dh::LaunchN(param_.gpu_id, 1, [=] __device__(int idx) { d_nodes[0] = DeviceNodeStats(d_sums[0], 0, gpu_params); }); } else { const int BlkDim = 256; const int ItemsPerThread = 4; // assign default node ids first int nBlks = dh::DivRoundUp(n_rows_, BlkDim); FillDefaultNodeIds<<<nBlks, BlkDim>>>(node_assigns_per_inst_.Data(), nodes_.Data(), n_rows_); // evaluate the correct child indices of non-missing values next nBlks = dh::DivRoundUp(n_vals_, BlkDim * ItemsPerThread); AssignNodeIds<<<nBlks, BlkDim>>>( node_assigns_per_inst_.Data(), nodeLocations_.Current(), nodeAssigns_.Current(), instIds_.Current(), nodes_.Data(), colOffsets_.Data(), vals_.Current(), n_vals_, n_cols_); // gather the node assignments across all other columns too dh::Gather(param_.gpu_id, nodeAssigns_.Current(), node_assigns_per_inst_.Data(), instIds_.Current(), n_vals_); SortKeys(level); } } void SortKeys(int level) { // segmented-sort the arrays based on node-id's // but we don't need more than level+1 bits for sorting! SegmentedSort(&tmp_mem_, &nodeAssigns_, &nodeLocations_, n_vals_, n_cols_, colOffsets_, 0, level + 1); dh::Gather<float, int>(param_.gpu_id, vals_.other(), vals_.Current(), instIds_.other(), instIds_.Current(), nodeLocations_.Current(), n_vals_); vals_.buff().selector ^= 1; instIds_.buff().selector ^= 1; } void MarkLeaves() { const int BlkDim = 128; int nBlks = dh::DivRoundUp(maxNodes_, BlkDim); MarkLeavesKernel<<<nBlks, BlkDim>>>(nodes_.Data(), maxNodes_); } }; XGBOOST_REGISTER_TREE_UPDATER(GPUMaker, "grow_gpu") .describe("Grow tree with GPU.") .set_body([]() { return new GPUMaker(); }); } // namespace tree } // namespace xgboost
062be51e09d7a33cbc3f270d69c860df3a671b3f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "redukcja.h" // <<Sequential addressing>> template<size_t Block> __global__ void device::scalar(real *x, real *y, int dim, real *res) { size_t tid = threadIdx.x; // Sum a sector for the thread. size_t lo = (tid * dim) / blockDim.x, hi = ((tid + 1) * dim) / blockDim.x; real total = 0; for (size_t i = lo; i < hi; ++i) { total += x[i] * y[i]; } __shared__ real aux[Block]; aux[tid] = total; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) aux[tid] += aux[tid + s]; __syncthreads(); } if (tid == 0) *res = aux[0]; }
062be51e09d7a33cbc3f270d69c860df3a671b3f.cu
#include "redukcja.h" // <<Sequential addressing>> template<size_t Block> __global__ void device::scalar(real *x, real *y, int dim, real *res) { size_t tid = threadIdx.x; // Sum a sector for the thread. size_t lo = (tid * dim) / blockDim.x, hi = ((tid + 1) * dim) / blockDim.x; real total = 0; for (size_t i = lo; i < hi; ++i) { total += x[i] * y[i]; } __shared__ real aux[Block]; aux[tid] = total; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) aux[tid] += aux[tid + s]; __syncthreads(); } if (tid == 0) *res = aux[0]; }
01cf181db9d8b6728574cc813651e6090c16d789.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "prefix_sum_cuda.cuh" __global__ void prefix_sum_cuda(int *a, size_t N) { int tid = threadIdx.x; int i = 0; for (i = 2; i <= N; i *= 2) { if (((i - tid % i) == 1) && tid != 0) { a[tid] = a[tid] + a[tid - i / 2]; } __syncthreads(); } if (tid == N - 1) { a[tid] = 0; } for (; i > 1; i /= 2) { if (((i - tid % i) == 1) && tid != 0) { int temp = a[tid - i / 2]; a[tid - i / 2] = a[tid]; a[tid] = a[tid] + temp; } __syncthreads(); } // a[tid] += d_in[tid]; }
01cf181db9d8b6728574cc813651e6090c16d789.cu
#include "prefix_sum_cuda.cuh" __global__ void prefix_sum_cuda(int *a, size_t N) { int tid = threadIdx.x; int i = 0; for (i = 2; i <= N; i *= 2) { if (((i - tid % i) == 1) && tid != 0) { a[tid] = a[tid] + a[tid - i / 2]; } __syncthreads(); } if (tid == N - 1) { a[tid] = 0; } for (; i > 1; i /= 2) { if (((i - tid % i) == 1) && tid != 0) { int temp = a[tid - i / 2]; a[tid - i / 2] = a[tid]; a[tid] = a[tid] + temp; } __syncthreads(); } // a[tid] += d_in[tid]; }
f7e36f32f2c2f8092a69670ad839df048545d88e.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <Hale.h> #include <glm/glm.hpp> #include "unistd.h" // for sleep() #include <fstream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "lib/Image.h" #include <vector> #include <unordered_map> #include <time.h> using namespace std; //from cuda_volume_rendering #define PI 3.14159265 #define MAX(a,b) ((a)>(b)?(a):(b)) texture<float, 3, hipReadModeElementType> tex0; // 3D texture texture<float, 3, hipReadModeElementType> tex1; // 3D texture texture<float, 3, hipReadModeElementType> tex2; // 3D texture texture<float, 3, hipReadModeElementType> tex3; // 3D texture texture<float, 3, hipReadModeElementType> tex4; // 3D texture texture<float, 3, hipReadModeElementType> tex5; // 3D texture /* hipArray *d_volumeArray0 = 0; hipArray *d_volumeArray1 = 0; hipArray *d_volumeArray2 = 0; */ #define NTEX 3 //texture<float, 3, hipReadModeElementType> tex[NTEX]; //+1 for an extra volume for interpolation hipArray *d_volumeArray[NTEX+1]; hipArray *d_volumeArray1[NTEX+1]; //range for quantizing double range[] = {0,1,0,1600,0,3300,0,1}; double range_p[] = {0,1,0,1,0,1,0,1}; #define CLIP(x,a,b) ((x)<(a)?(a):((x)>(b)?(b):(x))) //ctmr filter double ctmr_kern(double x) { if (x<-2) return 0; if (x<-1) return 0.5*(4+8*x+5*x*x+x*x*x); if (x<0) return 0.5*(2-5*x*x-3*x*x*x); if (x<1) return 0.5*(2-5*x*x+3*x*x*x); if (x<2) return 0.5*(4-8*x+5*x*x-x*x*x); return 0; } double ctmr_kern_g(double x) { if (x<-2) return 0; if (x<-1) return 0.5*(3*x*x+10*x+8); if (x<0) return 0.5*(-9*x*x-10*x); if (x<1) return 0.5*(9*x*x-10*x); if (x<2) return 0.5*(-3*x*x+10*x-8); return 0; } double ctmr_kern_gg(double x) { if (x<-2) return 0; if (x<-1) return 0.5*(6*x+10); if (x<0) return 0.5*(-18*x-10); if (x<1) return 0.5*(18*x-10); if (x<2) return 0.5*(-6*x+10); return 0; } //centered at a1, x in [0,1) double ctmr(double x, double a0, double a1, double a2, double a3) { double res = 0; res = a0*ctmr_kern(x+1)+a1*ctmr_kern(x)+a2*ctmr_kern(x-1)+a3*ctmr_kern(x-2); return res; } double ctmr_g(double x, double a0, double a1, double a2, double a3) { double res = 0; res = a0*ctmr_kern_g(x+1)+a1*ctmr_kern_g(x)+a2*ctmr_kern_g(x-1)+a3*ctmr_kern_g(x-2); return res; } double ctmr_gg(double x, double a0, double a1, double a2, double a3) { double res = 0; res = a0*ctmr_kern_gg(x+1)+a1*ctmr_kern_gg(x)+a2*ctmr_kern_gg(x-1)+a3*ctmr_kern_gg(x-2); return res; } // w0, w1, w2, and w3 are the four cubic B-spline basis functions __host__ __device__ float w0(float a) { return (1.0f/6.0f)*(a*(a*(-a + 3.0f) - 3.0f) + 1.0f); } __host__ __device__ float w1(float a) { return (1.0f/6.0f)*(a*a*(3.0f*a - 6.0f) + 4.0f); } __host__ __device__ float w2(float a) { return (1.0f/6.0f)*(a*(a*(-3.0f*a + 3.0f) + 3.0f) + 1.0f); } __host__ __device__ float w3(float a) { return (1.0f/6.0f)*(a*a*a); } //derivatives of basic functions __host__ __device__ float w0g(float a) { return -(1.0f/2.0f)*a*a + a - (1.0f/2.0f); } __host__ __device__ float w1g(float a) { return (3.0f/2.0f)*a*a - 2*a; } __host__ __device__ float w2g(float a) { return -(3.0f/2.0f)*a*a + a + (1.0/2.0); } __host__ __device__ float w3g(float a) { return (1.0f/2.0f)*a*a; } //second derivatives of basic functions __host__ __device__ float w0gg(float a) { return 1-a; } __host__ __device__ float w1gg(float a) { return 3*a-2; } __host__ __device__ float w2gg(float a) { return 1-3*a; } __host__ __device__ float w3gg(float a) { return a; } // filter 4 values using cubic splines template<class T> __host__ __device__ T cubicFilter(float x, T c0, T c1, T c2, T c3) { T r; r = c0 * w0(x); r += c1 * w1(x); r += c2 * w2(x); r += c3 * w3(x); return r; } //filtering with derivative of basic functions template<class T> __host__ __device__ T cubicFilter_G(float x, T c0, T c1, T c2, T c3) { T r; r = c0 * w0g(x); r += c1 * w1g(x); r += c2 * w2g(x); r += c3 * w3g(x); return r; } //filtering with second derivative of basic functions template<class T> __host__ __device__ T cubicFilter_GG(float x, T c0, T c1, T c2, T c3) { T r; r = c0 * w0gg(x); r += c1 * w1gg(x); r += c2 * w2gg(x); r += c3 * w3gg(x); return r; } template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter<R>(fy, cubicFilter<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } //gradient in X direction template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GX(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter<R>(fy, cubicFilter_G<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GY(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter_G<R>(fy, cubicFilter<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } template<class T, class R> __device__ R tex3DBicubic(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY<T,R>(texref,x,y,pz-1), tex3DBicubicXY<T,R>(texref,x,y,pz), tex3DBicubicXY<T,R>(texref,x,y,pz+1), tex3DBicubicXY<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GX(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GX<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GX<T,R>(texref,x,y,pz), tex3DBicubicXY_GX<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GX<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GY(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GY<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GY<T,R>(texref,x,y,pz), tex3DBicubicXY_GY<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GY<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GZ(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter_G<R>(fz, tex3DBicubicXY<T,R>(texref,x,y,pz-1), tex3DBicubicXY<T,R>(texref,x,y,pz), tex3DBicubicXY<T,R>(texref,x,y,pz+1), tex3DBicubicXY<T,R>(texref,x,y,pz+2) ); } template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GGX(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter<R>(fy, cubicFilter_GG<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter_GG<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter_GG<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter_GG<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GGY(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter_GG<R>(fy, cubicFilter<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } //derivative through X, then through Y template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GYGX(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter_G<R>(fy, cubicFilter_G<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } template<class T, class R> __device__ R tex3DBicubic_GGX(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GGX<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GGX<T,R>(texref,x,y,pz), tex3DBicubicXY_GGX<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GGX<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GGY(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GGY<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GGY<T,R>(texref,x,y,pz), tex3DBicubicXY_GGY<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GGY<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GGZ(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter_GG<R>(fz, tex3DBicubicXY<T,R>(texref,x,y,pz-1), tex3DBicubicXY<T,R>(texref,x,y,pz), tex3DBicubicXY<T,R>(texref,x,y,pz+1), tex3DBicubicXY<T,R>(texref,x,y,pz+2) ); } //derivative through X, then through Y template<class T, class R> __device__ R tex3DBicubic_GYGX(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GYGX<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GYGX<T,R>(texref,x,y,pz), tex3DBicubicXY_GYGX<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GYGX<T,R>(texref,x,y,pz+2) ); } //derivative through X, then through Z template<class T, class R> __device__ R tex3DBicubic_GZGX(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter_G<R>(fz, tex3DBicubicXY_GX<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GX<T,R>(texref,x,y,pz), tex3DBicubicXY_GX<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GX<T,R>(texref,x,y,pz+2) ); } //derivative through Y, then through Z template<class T, class R> __device__ R tex3DBicubic_GZGY(const texture<T, 3, hipReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter_G<R>(fz, tex3DBicubicXY_GY<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GY<T,R>(texref,x,y,pz), tex3DBicubicXY_GY<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GY<T,R>(texref,x,y,pz+2) ); } __host__ __device__ int cu_getIndex2(int i, int j, int s1, int s2) { return i*s2+j; } __host__ __device__ double dotProduct(double *u, double *v, int s) { double result = 0; for (int i=0; i<s; i++) result += (u[i]*v[i]); return result; } __host__ __device__ double lenVec(double *a, int s) { double len = 0; for (int i=0; i<s; i++) len += (a[i]*a[i]); len = sqrt(len); return len; } __host__ __device__ void addVector(double *a, double *b, double *c, int len) { for (int i=0; i<len; i++) c[i] = a[i]+b[i]; } __host__ __device__ void scaleVector(double *a, int len, double scale) { for (int i=0; i<len; i++) a[i]*=scale; } void mulMatPoint(double X[4][4], double Y[4], double Z[4]) { for (int i=0; i<4; i++) Z[i] = 0; for (int i=0; i<4; i++) for (int k=0; k<4; k++) Z[i] += (X[i][k]*Y[k]); } __device__ void cu_mulMatPoint(double* X, double* Y, double* Z) { for (int i=0; i<4; i++) Z[i] = 0; for (int i=0; i<4; i++) for (int k=0; k<4; k++) Z[i] += (X[cu_getIndex2(i,k,4,4)]*Y[k]); } __device__ void cu_mulMatPoint3(double* X, double* Y, double* Z) { for (int i=0; i<3; i++) Z[i] = 0; for (int i=0; i<3; i++) for (int k=0; k<3; k++) Z[i] += (X[cu_getIndex2(i,k,3,3)]*Y[k]); } __host__ __device__ void advancePoint(double* point, double* dir, double scale, double* newpos) { for (int i=0; i<3; i++) newpos[i] = point[i]+dir[i]*scale; } __device__ bool cu_isInsideDouble(double i, double j, double k, int dim1, int dim2, int dim3) { return ((i>=0)&&(i<=(dim1-1))&&(j>=0)&&(j<=(dim2-1))&&(k>=0)&&(k<=(dim3-1))); } __device__ double cu_computeAlpha(double val, double grad_len, double isoval, double alphamax, double thickness) { if ((grad_len == 0.0) && (val == isoval)) return alphamax; else if ((grad_len>0.0) && (isoval >= (val-thickness*grad_len)) && (isoval <= (val+thickness*grad_len))) return alphamax*(1-abs(isoval-val)/(grad_len*thickness)); else return 0.0; } __device__ double cu_inAlpha(double val, double grad_len, double isoval, double thickness) { if (val >= isoval) return 1.0; else { return max(0.0,(1-abs(isoval-val)/(grad_len*thickness))); } } __device__ double cu_inAlphaX(double dis, double thickness) { if (dis<0) return 1.0; //return max(0.0,min(1.0,1.4-fabs(dis)/thickness)); return max(0.0,min(1.0,1.0-fabs(dis)/thickness)); } __device__ double cu_inAlphaX2(double dis, double thickness) { return max(0.0,1.0-fabs(dis)/thickness); } __host__ __device__ void normalize(double *a, int s) { double len = lenVec(a,s); for (int i=0; i<s; i++) a[i] = a[i]/len; } __host__ __device__ double diss2P(double x1,double y1,double z1,double x2,double y2,double z2) { double dis1 = x2-x1; double dis2 = y2-y1; double dis3 = z2-z1; return (dis1*dis1+dis2*dis2+dis3*dis3); } __host__ __device__ void mulMat3(double* X, double* Y, double* Z) { for (int i=0; i<3; i++) for (int j=0; j<3; j++) { for (int k=0; k<3; k++) { Z[cu_getIndex2(i,j,3,3)] += (X[cu_getIndex2(i,k,3,3)]*Y[cu_getIndex2(k,j,3,3)]); } } } __host__ __device__ void invertMat33(double X[][3], double Y[][3]) { double det = X[0][0]* (X[1][1]* X[2][2]- X[2][1]* X[1][2])- X[0][1]* (X[1][0]* X[2][2]- X[1][2]* X[2][0])+ X[0][2]* (X[1][0]* X[2][1]- X[1][1]* X[2][0]); double invdet = 1 / det; Y[0][0]= (X[1][1]* X[2][2]- X[2][1]* X[1][2]) * invdet; Y[0][1]= (X[0][2]* X[2][1]- X[0][1]* X[2][2]) * invdet; Y[0][2]= (X[0][1]* X[1][2]- X[0][2]* X[1][1])* invdet; Y[1][0]= (X[1][2]* X[2][0]- X[1][0]* X[2][2])* invdet; Y[1][1]= (X[0][0]* X[2][2]- X[0][2]* X[2][0])* invdet; Y[1][2]= (X[1][0]* X[0][2]- X[0][0]* X[1][2])* invdet; Y[2][0]= (X[1][0]* X[2][1]- X[2][0]* X[1][1])* invdet; Y[2][1]= (X[2][0]* X[0][1]- X[0][0]* X[2][1])* invdet; Y[2][2]= (X[0][0]* X[1][1]- X[1][0]* X[0][1]) * invdet; } __host__ __device__ void eigenOfHess(double* hessian, double *eigval) { double Dxx = hessian[cu_getIndex2(0,0,3,3)]; double Dyy = hessian[cu_getIndex2(1,1,3,3)]; double Dzz = hessian[cu_getIndex2(2,2,3,3)]; double Dxy = hessian[cu_getIndex2(0,1,3,3)]; double Dxz = hessian[cu_getIndex2(0,2,3,3)]; double Dyz = hessian[cu_getIndex2(1,2,3,3)]; double J1 = Dxx + Dyy + Dzz; double J2 = Dxx*Dyy + Dxx*Dzz + Dyy*Dzz - Dxy*Dxy - Dxz*Dxz - Dyz*Dyz; double J3 = 2*Dxy*Dxz*Dyz + Dxx*Dyy*Dzz - Dxz*Dxz*Dyy - Dxx*Dyz*Dyz - Dxy*Dxy*Dzz; double Q = (J1*J1-3*J2)/9; double R = (-9*J1*J2+27*J3+2*J1*J1*J1)/54; double theta = (1.0/3.0)*acos(R/sqrt(Q*Q*Q)); double sqrtQ = sqrt(Q); double twosqrtQ = 2*sqrtQ; double J1o3 = J1/3; eigval[0] = J1o3 + twosqrtQ*cos(theta); eigval[1] = J1o3 + twosqrtQ*cos(theta-2*M_PI/3); eigval[2] = J1o3 + twosqrtQ*cos(theta+2*M_PI/3); } __device__ void computeHessian(double *hessian, double *p) { hessian[cu_getIndex2(0,0,3,3)]=tex3DBicubic_GGX<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(0,1,3,3)]=tex3DBicubic_GYGX<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(0,2,3,3)]=tex3DBicubic_GZGX<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(1,1,3,3)]=tex3DBicubic_GGY<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(1,2,3,3)]=tex3DBicubic_GZGY<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(2,2,3,3)]=tex3DBicubic_GGZ<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(1,0,3,3)] = hessian[cu_getIndex2(0,1,3,3)]; hessian[cu_getIndex2(2,0,3,3)] = hessian[cu_getIndex2(0,2,3,3)]; hessian[cu_getIndex2(2,1,3,3)] = hessian[cu_getIndex2(1,2,3,3)]; } __device__ void computeHessian(double *hessian, double *p,const texture<float, 3, hipReadModeElementType> tex0) { hessian[cu_getIndex2(0,0,3,3)]=tex3DBicubic_GGX<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(0,1,3,3)]=tex3DBicubic_GYGX<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(0,2,3,3)]=tex3DBicubic_GZGX<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(1,1,3,3)]=tex3DBicubic_GGY<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(1,2,3,3)]=tex3DBicubic_GZGY<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(2,2,3,3)]=tex3DBicubic_GGZ<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(1,0,3,3)] = hessian[cu_getIndex2(0,1,3,3)]; hessian[cu_getIndex2(2,0,3,3)] = hessian[cu_getIndex2(0,2,3,3)]; hessian[cu_getIndex2(2,1,3,3)] = hessian[cu_getIndex2(1,2,3,3)]; } __host__ __device__ void cross(double *u, double *v, double *w) { w[0] = u[1]*v[2]-u[2]*v[1]; w[1] = u[2]*v[0]-u[0]*v[2]; w[2] = u[0]*v[1]-u[1]*v[0]; } __host__ __device__ float lerp(float y0, float y1, float x0, float x, float x1) { float alpha = (x-x0)/(x1-x0); return y0*(1-alpha)+alpha*y1; } __host__ __device__ float lerp(float y0, float y1, float alpha) { return y0*(1-alpha)+alpha*y1; } __device__ double max3(double x, double y, double z) { double max2 = (x>y?x:y); return max2>z?max2:z; } __device__ double clamp(double x0, double x1, double x) { return (x<x0)?x0:((x>x1)?x1:x); } //interpolate the volume in between __global__ void kernel_interpol(float *intervol, int* dim, float alpha) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; if ((i>=dim[1]) || (j>=dim[2]) || (k>=dim[3])) return; intervol[k*dim[2]*dim[1] + j*dim[1] + i] = lerp(tex3D(tex0,i,j,k),tex3D(tex1,i,j,k),alpha); if (i<=2 && j<=2 && k<=2) { printf("inside kernel_interpol, val at (%d,%d,%d) = %f\n", i,j,k,intervol[k*dim[2]*dim[1] + j*dim[1] + i]); printf("inside kernel_interpol, tex0 at (%d,%d,%d) = %f\n",i,j,k, tex3D(tex0,i,j,k)); } } __global__ void kernel_interpol2(float *intervol, float *intervol2, int* dim, float alpha) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; if ((i>=dim[1]) || (j>=dim[2]) || (k>=dim[3])) return; intervol[k*dim[2]*dim[1] + j*dim[1] + i] = lerp(tex3D(tex0,i,j,k),tex3D(tex1,i,j,k),alpha); intervol2[k*dim[2]*dim[1] + j*dim[1] + i] = lerp(tex3D(tex3,i,j,k),tex3D(tex4,i,j,k),alpha); } //test function __global__ void kernel_peak_test(int* dim, int *size, double verextent, double *center, double *dir1, double *dir2, double swidth, double sstep, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; //temporary test double refstep=sstep, thickness=0.5; double phongKa=0.2, phongKd=0.8; double light_dir[3]={0,0,1}; normalize(light_dir,3); double pixsize = verextent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double pointi[3]; advancePoint(center,dir1,ni*pixsize,pointi); advancePoint(pointi,dir2,nj*pixsize,pointi); double mipdir[3]; cross(dir1,dir2,mipdir); normalize(mipdir,3); double curpoint[3]; int k; for (k=0; k<3; k++) curpoint[k] = pointi[k] - mipdir[k]*swidth/2; double indPoint[4]; double gradgfpi[3]; double pointColor; double alpha; double valgfp; double hessian[9]; double hessian_33[3][3]; double hessian_33inv[3][3]; double hessian_inv[9]; double peakdis[3]; double len_peakdis; double pointColorGFP; double alphaGFP; double transpGFP = 1; double accColorGFP = 0; for (k=0; k<ceil(swidth/sstep); k++) { if (cu_isInsideDouble(curpoint[0],curpoint[1],curpoint[2],dim[1],dim[2],dim[3])) { computeHessian(hessian,curpoint,tex2); memcpy(hessian_33,hessian,sizeof(double)*9); invertMat33(hessian_33,hessian_33inv); memcpy(hessian_inv,hessian_33inv,sizeof(double)*9); gradgfpi[0] = tex3DBicubic_GX<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); gradgfpi[1] = tex3DBicubic_GY<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); gradgfpi[2] = tex3DBicubic_GZ<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); cu_mulMatPoint3(hessian_inv,gradgfpi,peakdis); scaleVector(peakdis,3,-1); len_peakdis = lenVec(peakdis,3); double eigenval[3]; eigenOfHess(hessian,eigenval); //printf("Inside kernel_peak, before checking if eigenval < 0\n"); if (eigenval[0]<0 && eigenval[1]<0 && eigenval[2]<0) //if (1) { //printf("there is something with eigenval < 0, len_peakdis = %f\n",len_peakdis); normalize(peakdis,3); double maxev = max3(eigenval[0],eigenval[1],eigenval[2]); pointColorGFP = phongKa + phongKd*max(0.0f,dotProduct(peakdis,light_dir,3)); alphaGFP = cu_inAlphaX(len_peakdis-100,thickness); //printf("(i,j,k)=(%d,%d,%d); len_peakdis = %f, alphaGFP = %f\n", i,j,k, len_peakdis, alphaGFP); //temporary disactivated for testing //alphaGFP *= clamp(0,1,lerp(0,1,8.0,-maxev,10.0)); //printf("(i,j,k)=(%d,%d,%d); -maxev = %f, after clamp(0,1,lerp(0,1,40.0,-maxev,41.0)): alphaGFP = %f\n", i,j,k,-maxev, alphaGFP); alphaGFP = 1 - pow(1-alphaGFP,sstep/refstep); //if (alphaGFP>0) // printf("alphaGFP > 0\n"); //debug purpose //alphaGFP = 1.0; //printf("(i,j,k)=(%d,%d,%d); after (1 - pow(1-alphaGFP,sstep/refstep)): alphaGFP = %f\n",i,j,k, alphaGFP); //transpGFP *= (1-alphaGFP); transpGFP = 0; //accColorGFP = accColorGFP*(1-alphaGFP) + pointColorGFP*alphaGFP; if (accColorGFP==0) accColorGFP = len_peakdis; else //if (len_peakdis>0) accColorGFP = min(accColorGFP,len_peakdis); //printf("(i,j,k)=(%d,%d,%d); accColorGFP = %f\n", accColorGFP); } } curpoint[0] = curpoint[0] + mipdir[0]*sstep; curpoint[1] = curpoint[1] + mipdir[1]*sstep; curpoint[2] = curpoint[2] + mipdir[2]*sstep; } double accAlphaGFP = 1 - transpGFP; imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = 0; if (accAlphaGFP>0) { imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = accColorGFP/accAlphaGFP; if (accColorGFP/accAlphaGFP>0) printf("accColorGFP/accAlphaGFP = %f, accAlphaGFP = %f\n",accColorGFP/accAlphaGFP,accAlphaGFP); } else { imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = accColorGFP; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = accAlphaGFP; } //finding peak __global__ void kernel_peak(int* dim, int *size, double verextent, double *center, double *dir1, double *dir2, double swidth, double sstep, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; //temporary test double refstep=sstep, thickness=1.0; double phongKa=0.2, phongKd=0.8; double light_dir[3]={0,0,1}; //double light_dir[3]={-1,-1,1}; normalize(light_dir,3); double pixsize = verextent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double pointi[3]; advancePoint(center,dir1,ni*pixsize,pointi); advancePoint(pointi,dir2,nj*pixsize,pointi); double mipdir[3]; cross(dir1,dir2,mipdir); normalize(mipdir,3); double curpoint[3]; int k; for (k=0; k<3; k++) curpoint[k] = pointi[k] - mipdir[k]*swidth/2; double indPoint[4]; double gradgfpi[3]; double pointColor; double alpha; double valgfp; double hessian[9]; double hessian_33[3][3]; double hessian_33inv[3][3]; double hessian_inv[9]; double peakdis[3]; double len_peakdis; double pointColorGFP; double alphaGFP; double transpGFP = 1; double accColorGFP = 0; for (k=0; k<ceil(swidth/sstep); k++) { if (cu_isInsideDouble(curpoint[0],curpoint[1],curpoint[2],dim[1],dim[2],dim[3])) { computeHessian(hessian,curpoint,tex2); memcpy(hessian_33,hessian,sizeof(double)*9); invertMat33(hessian_33,hessian_33inv); memcpy(hessian_inv,hessian_33inv,sizeof(double)*9); gradgfpi[0] = tex3DBicubic_GX<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); gradgfpi[1] = tex3DBicubic_GY<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); gradgfpi[2] = tex3DBicubic_GZ<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); cu_mulMatPoint3(hessian_inv,gradgfpi,peakdis); //scaleVector(peakdis,3,-1); len_peakdis = lenVec(peakdis,3); double eigenval[3]; eigenOfHess(hessian,eigenval); //printf("Inside kernel_peak, before checking if eigenval < 0\n"); if (eigenval[0]<0 && eigenval[1]<0 && eigenval[2]<0) //if (1) { //printf("there is something with eigenval < 0, len_peakdis = %f\n",len_peakdis); normalize(peakdis,3); double maxev = max3(eigenval[0],eigenval[1],eigenval[2]); pointColorGFP = phongKa + phongKd*max(0.0f,dotProduct(peakdis,light_dir,3)); alphaGFP = cu_inAlphaX(len_peakdis-8,thickness); //alphaGFP = cu_computeAlpha(len_peakdis, len_peakdis, 50, 1, thickness); //printf("(i,j,k)=(%d,%d,%d); len_peakdis = %f, alphaGFP = %f\n", i,j,k, len_peakdis, alphaGFP); //temporary deactivated for testing alphaGFP *= clamp(0,1,lerp(0,1,8.0,-maxev,10.0)); //alphaGFP *= clamp(0,1,lerp(0,1,6.0,-maxev,10.0)); //printf("(i,j,k)=(%d,%d,%d); -maxev = %f, after clamp(0,1,lerp(0,1,40.0,-maxev,41.0)): alphaGFP = %f\n", i,j,k,-maxev, alphaGFP); alphaGFP = 1 - pow(1-alphaGFP,sstep/refstep); //if (alphaGFP>0) // printf("alphaGFP > 0\n"); //debug purpose //alphaGFP = 1.0; //printf("(i,j,k)=(%d,%d,%d); after (1 - pow(1-alphaGFP,sstep/refstep)): alphaGFP = %f\n",i,j,k, alphaGFP); transpGFP *= (1-alphaGFP); accColorGFP = accColorGFP*(1-alphaGFP) + pointColorGFP*alphaGFP; //printf("(i,j,k)=(%d,%d,%d); accColorGFP = %f\n", accColorGFP); } } curpoint[0] = curpoint[0] + mipdir[0]*sstep; curpoint[1] = curpoint[1] + mipdir[1]*sstep; curpoint[2] = curpoint[2] + mipdir[2]*sstep; } double accAlphaGFP = 1 - transpGFP; imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = 0; if (accAlphaGFP>0) { imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = accColorGFP/accAlphaGFP; if (accColorGFP/accAlphaGFP>0) printf("accColorGFP/accAlphaGFP = %f, accAlphaGFP = %f\n",accColorGFP/accAlphaGFP,accAlphaGFP); } else { imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = accColorGFP; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = accAlphaGFP; } //peak with RFP constraint __global__ void kernel_peak_2chan(int* dim, int *size, double verextent, double *center, double *dir1, double *dir2, double swidth, double sstep, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; //temporary test double refstep=sstep, thickness=1.0; double phongKa=0.2, phongKd=0.8; double light_dir[3]={0,0,1}; normalize(light_dir,3); double isoval = 800; double alphamax = 1; double pixsize = verextent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double pointi[3]; advancePoint(center,dir1,ni*pixsize,pointi); advancePoint(pointi,dir2,nj*pixsize,pointi); double mipdir[3]; cross(dir1,dir2,mipdir); normalize(mipdir,3); double curpoint[3]; int k; for (k=0; k<3; k++) curpoint[k] = pointi[k] - mipdir[k]*swidth/2; double indPoint[4]; double gradgfpi[3]; double pointColor; double alpha; double valgfp; double hessian[9]; double hessian_33[3][3]; double hessian_33inv[3][3]; double hessian_inv[9]; double peakdis[3]; double len_peakdis; double pointColorGFP; double alphaGFP; double transpGFP = 1; double accColorGFP = 0; double gradi[3]; double gradi_len; double val; double accColor = 0; double mipVal = -1; double transp = 1; for (k=0; k<ceil(swidth/sstep); k++) { if (cu_isInsideDouble(curpoint[0],curpoint[1],curpoint[2],dim[1],dim[2],dim[3])) { val = tex3DBicubic<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); gradi[0] = tex3DBicubic_GX<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); gradi[1] = tex3DBicubic_GY<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); gradi[2] = tex3DBicubic_GZ<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); //cu_mulMatPoint3(MT_BE_inv, gradi, gradw); gradi_len = lenVec(gradi,3); //negating and normalizing for (int l=0; l<3; l++) gradi[l] = -gradi[l]/gradi_len; //depth = (k*1.0+1)/(fc*1.0-nc); pointColor = phongKa + phongKd*max(0.0f,dotProduct(gradi,light_dir,3)); alpha = cu_computeAlpha(val, gradi_len, isoval, alphamax, thickness); //alpha = 0.5; alpha = 1 - pow(1-alpha,sstep/refstep); transp *= (1-alpha); accColor = accColor*(1-alpha) + pointColor*alpha; //valgfp = tex3DBicubic<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); double inalpha = cu_inAlpha(val,gradi_len,isoval,thickness); if (inalpha>0) { computeHessian(hessian,curpoint,tex2); memcpy(hessian_33,hessian,sizeof(double)*9); invertMat33(hessian_33,hessian_33inv); memcpy(hessian_inv,hessian_33inv,sizeof(double)*9); gradgfpi[0] = tex3DBicubic_GX<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); gradgfpi[1] = tex3DBicubic_GY<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); gradgfpi[2] = tex3DBicubic_GZ<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); cu_mulMatPoint3(hessian_inv,gradgfpi,peakdis); //scaleVector(peakdis,3,-1); len_peakdis = lenVec(peakdis,3); double eigenval[3]; eigenOfHess(hessian,eigenval); //printf("Inside kernel_peak, before checking if eigenval < 0\n"); if (eigenval[0]<0 && eigenval[1]<0 && eigenval[2]<0) //if (1) { //printf("there is something with eigenval < 0\n"); normalize(peakdis,3); double maxev = max3(eigenval[0],eigenval[1],eigenval[2]); pointColorGFP = phongKa + phongKd*max(0.0f,dotProduct(peakdis,light_dir,3)); alphaGFP = cu_inAlphaX(len_peakdis-8,thickness); //printf("(i,j,k)=(%d,%d,%d); len_peakdis = %f, alphaGFP = %f\n", i,j,k, len_peakdis, alphaGFP); alphaGFP *= clamp(0,1,lerp(0,1,8.0,-maxev,10.0)); //printf("(i,j,k)=(%d,%d,%d); -maxev = %f, after clamp(0,1,lerp(0,1,40.0,-maxev,41.0)): alphaGFP = %f\n", i,j,k,-maxev, alphaGFP); alphaGFP = 1 - pow(1-alphaGFP,sstep/refstep); //debug purpose //alphaGFP = 1.0; //printf("(i,j,k)=(%d,%d,%d); after (1 - pow(1-alphaGFP,sstep/refstep)): alphaGFP = %f\n",i,j,k, alphaGFP); transpGFP *= (1-alphaGFP); accColorGFP = accColorGFP*(1-alphaGFP) + pointColorGFP*alphaGFP; //printf("(i,j,k)=(%d,%d,%d); accColorGFP = %f\n", accColorGFP); accColorGFP*=inalpha; } } } curpoint[0] = curpoint[0] + mipdir[0]*sstep; curpoint[1] = curpoint[1] + mipdir[1]*sstep; curpoint[2] = curpoint[2] + mipdir[2]*sstep; } double accAlphaGFP = 1 - transpGFP; double accAlpha = 1 - transp; //imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = 0; if (accAlpha>0) { imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = accColor/accAlpha; } else { imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = accColor; } if (accAlphaGFP>0) { imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = accColorGFP/accAlphaGFP; } else { imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = accColorGFP; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; //imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = accAlphaGFP; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = accAlpha; } //currently working in index-space //do MIP for a small slice around each point __global__ void kernel_cpr(int* dim, int *size, double verextent, double *center, double *dir1, double *dir2, double swidth, double sstep, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; double pixsize = verextent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double pointi[3]; advancePoint(center,dir1,ni*pixsize,pointi); advancePoint(pointi,dir2,nj*pixsize,pointi); double mipdir[3]; cross(dir1,dir2,mipdir); normalize(mipdir,3); double mipval = INT_MIN; double curpoint[3]; int k; for (k=0; k<3; k++) curpoint[k] = pointi[k] - mipdir[k]*swidth/2; for (k=0; k<ceil(swidth/sstep); k++) { double curval; //curval = tex3DBicubic<float,float>(tex0,curpoint[0],curpoint[1],curpoint[2]); curval = tex3DBicubic<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); mipval = MAX(mipval,curval); curpoint[0] = curpoint[0] + mipdir[0]*sstep; curpoint[1] = curpoint[1] + mipdir[1]*sstep; curpoint[2] = curpoint[2] + mipdir[2]*sstep; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = 0; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = mipval; for (int k=2; k<nOutChannel-1; k++) imageDouble[j*size[0]*nOutChannel+i*nOutChannel+k] = 0; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = 1; } __global__ void kernel_cpr_2chan(int* dim, int *size, double verextent, double *center, double *dir1, double *dir2, double swidth, double sstep, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; //temporary test double refstep=sstep, thickness=1.0; double phongKa=0.2, phongKd=0.8; double light_dir[3]={0,0,1}; //double light_dir[3]={1,1,1}; normalize(light_dir,3); double isoval = 800; //double isoval = 400; //double isoval = 1800; double alphamax = 1; double pixsize = verextent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double pointi[3]; advancePoint(center,dir1,ni*pixsize,pointi); advancePoint(pointi,dir2,nj*pixsize,pointi); double mipdir[3]; cross(dir1,dir2,mipdir); normalize(mipdir,3); //double mipval = INT_MIN; double curpoint[3]; int k; for (k=0; k<3; k++) curpoint[k] = pointi[k] - mipdir[k]*swidth/2; double gradi[3]; double gradi_len; double alpha; double val, valgfp; double pointColor; double accColor = 0; double mipVal = -1; double transp = 1; double mipRFP = -1; for (k=0; k<ceil(swidth/sstep); k++) { val = tex3DBicubic<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); mipRFP = MAX(val,mipRFP); gradi[0] = tex3DBicubic_GX<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); gradi[1] = tex3DBicubic_GY<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); gradi[2] = tex3DBicubic_GZ<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); //cu_mulMatPoint3(MT_BE_inv, gradi, gradw); gradi_len = lenVec(gradi,3); //negating and normalizing for (int l=0; l<3; l++) gradi[l] = -gradi[l]/gradi_len; //depth = (k*1.0+1)/(fc*1.0-nc); pointColor = phongKa + phongKd*max(0.0f,dotProduct(gradi,light_dir,3)); alpha = cu_computeAlpha(val, gradi_len, isoval, alphamax, thickness); //alpha = 0.5; alpha = 1 - pow(1-alpha,sstep/refstep); transp *= (1-alpha); accColor = accColor*(1-alpha) + pointColor*alpha; valgfp = tex3DBicubic<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); mipVal = max(mipVal,valgfp*cu_inAlpha(val,gradi_len,isoval,thickness)); curpoint[0] = curpoint[0] + mipdir[0]*sstep; curpoint[1] = curpoint[1] + mipdir[1]*sstep; curpoint[2] = curpoint[2] + mipdir[2]*sstep; } double accAlpha = 1 - transp; if (accAlpha>0) { imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = accColor/accAlpha; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = mipVal; //imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = mipRFP; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; } else { imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = accColor; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = mipVal; //imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = mipRFP; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = accAlpha; } __global__ void kernel_cprinter(double alpha, int* dim, int *size, double verextent, double *center, double *dir1, double *dir2, double swidth, double sstep, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; double pixsize = verextent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double pointi[3]; advancePoint(center,dir1,ni*pixsize,pointi); advancePoint(pointi,dir2,nj*pixsize,pointi); double mipdir[3]; cross(dir1,dir2,mipdir); normalize(mipdir,3); double mipval = INT_MIN; double curpoint[3]; int k; for (k=0; k<3; k++) curpoint[k] = pointi[k] - mipdir[k]*swidth/2; for (k=0; k<ceil(swidth/sstep); k++) { double curval; curval = lerp(tex3DBicubic<float,float>(tex0,curpoint[0],curpoint[1],curpoint[2]),tex3DBicubic<float,float>(tex1,curpoint[0],curpoint[1],curpoint[2]),alpha); mipval = MAX(mipval,curval); curpoint[0] = curpoint[0] + mipdir[0]*sstep; curpoint[1] = curpoint[1] + mipdir[1]*sstep; curpoint[2] = curpoint[2] + mipdir[2]*sstep; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = 0; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = mipval; for (int k=2; k<nOutChannel-1; k++) imageDouble[j*size[0]*nOutChannel+i*nOutChannel+k] = 0; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = 1; } void computeMean(double *points, int n, double *means) { memset(means,0,sizeof(double)*3); for (int i=0; i<3; i++) { for (int k=0; k<n; k++) means[i] += points[k*3+i]; means[i]/=n; } } void computeCovariance(double *points, int n, double *cov) { double means[3]; computeMean(points,n,means); //memset(cov,0,sizeof(double)*9); for (int i=0; i<3; i++) for (int j=i; j<3; j++) { double localcov = 0; for (int k=0; k<n; k++) { localcov += (points[k*3+i]-means[i])*(points[k*3+j]-means[j]); } localcov/=n; cov[cu_getIndex2(i,j,3,3)] = cov[cu_getIndex2(j,i,3,3)] = localcov; } } int isScaleOf(double *v1, double *v2, int s) { double factor; for (int i=0; i<s; i++) if (v1[i]) { factor = v2[i]/v1[i]; break; } for (int i=0; i<s; i++) if (v1[i]*factor != v2[i]) return 0; return 1; } //for symmetric 3x3 matrix void computeEigenVec(double *matrix, double eigval, double *eigvec) { double matrixtmp[9]; memcpy(matrixtmp,matrix,sizeof(double)*9); for (int i=0; i<3; i++) matrixtmp[cu_getIndex2(i,i,3,3)] = matrixtmp[cu_getIndex2(i,i,3,3)] - eigval; double col1[3], col2[3]; int ind = 0; for (ind = 0; ind<3; ind++) { if (matrixtmp[cu_getIndex2(0,ind,3,3)] || matrixtmp[cu_getIndex2(1,ind,3,3)] || matrixtmp[cu_getIndex2(2,ind,3,3)]) break; } if (ind<3) { for (int i=0; i<3; i++) col1[i] = matrixtmp[cu_getIndex2(i,ind,3,3)]; int ind2; for (ind2 = ind+1; ind2<3; ind2++) { if (matrixtmp[cu_getIndex2(0,ind2,3,3)] || matrixtmp[cu_getIndex2(1,ind2,3,3)] || matrixtmp[cu_getIndex2(2,ind2,3,3)]) break; } if (ind2<3) { for (int i=0; i<3; i++) col2[i] = matrixtmp[cu_getIndex2(i,ind2,3,3)]; if (isScaleOf(col1,col2,3)) { ind2++; if (ind2<3) { if (matrixtmp[cu_getIndex2(0,ind2,3,3)] || matrixtmp[cu_getIndex2(1,ind2,3,3)] || matrixtmp[cu_getIndex2(2,ind2,3,3)]) { for (int i=0; i<3; i++) col2[i] = matrixtmp[cu_getIndex2(i,ind2,3,3)]; if (isScaleOf(col1,col2,3)) { double tmp[3]; memcpy(tmp,col1,sizeof(double)*3); tmp[0]++; double tmp2[3]; cross(col1,tmp,tmp2); cross(tmp2,col1,eigvec); } else { cross(col1,col2,eigvec); } } else { double tmp[3]; memcpy(tmp,col1,sizeof(double)*3); tmp[0]++; double tmp2[3]; cross(col1,tmp,tmp2); cross(tmp2,col1,eigvec); } } else { double tmp[3]; memcpy(tmp,col1,sizeof(double)*3); tmp[0]++; double tmp2[3]; cross(col1,tmp,tmp2); cross(tmp2,col1,eigvec); } } else { cross(col1,col2,eigvec); } } else { double tmp[3]; memcpy(tmp,col1,sizeof(double)*3); tmp[0]++; double tmp2[3]; cross(col1,tmp,tmp2); cross(tmp2,col1,eigvec); } } else { eigvec[0] = eigvec[1] = eigvec[2] = 1; } normalize(eigvec,3); } void drawCircle(unsigned char *img, int s0, int s1, int s2, int drawchan, int c1, int c2, double rad) { double angstep = 0.2; for (double curang = 0; curang<2*M_PI; curang+=angstep) { int i1, i2; i2 = sin(curang)*rad; i1 = cos(curang)*rad; i1 += c1; i2 += c2; img[i2*s1*s0 + i1*s0 + drawchan] = 255; } } void drawCircleWithColor(unsigned char *img, int s0, int s1, int s2, int c1, int c2, double rad, double angstep, unsigned char color0, unsigned char color1, unsigned char color2) { for (double curang = 0; curang<2*M_PI; curang+=angstep) { int i1, i2; i2 = sin(curang)*rad; i1 = cos(curang)*rad; i1 += c1; i2 += c2; img[i2*s1*s0 + i1*s0 + 0] = color0; img[i2*s1*s0 + i1*s0 + 1] = color1; img[i2*s1*s0 + i1*s0 + 2] = color2; } } void drawCross(unsigned char *img, int s0, int s1, int s2, int drawchan, int c1, int c2, double rad) { for (int i=c1-rad; i<c1+rad; i++) img[c2*s1*s0 + i*s0 + drawchan] = 255; for (int i=c2-rad; i<c2+rad; i++) img[i*s1*s0 + c1*s0 + drawchan] = 255; } void drawCrossWithColor(unsigned char *img, int s0, int s1, int s2, int c1, int c2, double rad, unsigned char *color) { for (int k = 0; k<3; k++) { for (int i=c1-rad; i<c1+rad; i++) img[c2*s1*s0 + i*s0 + k] = color[k]; for (int i=c2-rad; i<c2+rad; i++) img[i*s1*s0 + c1*s0 + k] = color[k]; } } //draw the first N circles on the grid of RxC void drawNCircle(unsigned char *img, int s0, int s1, int s2, int drawchan, int N, int g1, int g2) { double rad; double w1 = s1/g1; double w2 = s2/g2; rad = w1<w2?w1/3:w2/3; for (int i=0; i<N; i++) { int gi1 = i/g1; int gi2 = i%g2; int pi1 = gi1*w1+w1/2; int pi2 = gi2*w2+w2/2; drawCircle(img,s0,s1,s2,drawchan,pi1,pi2,rad); } } double calDet44(double X[][4]) { double value = ( X[0][3]*X[1][2]*X[2][1]*X[3][0] - X[0][2]*X[1][3]*X[2][1]*X[3][0] - X[0][3]*X[1][1]*X[2][2]*X[3][0] + X[0][1]*X[1][3]*X[2][2]*X[3][0]+ X[0][2]*X[1][1]*X[2][3]*X[3][0] - X[0][1]*X[1][2]*X[2][3]*X[3][0] - X[0][3]*X[1][2]*X[2][0]*X[3][1] + X[0][2]*X[1][3]*X[2][0]*X[3][1]+ X[0][3]*X[1][0]*X[2][2]*X[3][1] - X[0][0]*X[1][3]*X[2][2]*X[3][1] - X[0][2]*X[1][0]*X[2][3]*X[3][1] + X[0][0]*X[1][2]*X[2][3]*X[3][1]+ X[0][3]*X[1][1]*X[2][0]*X[3][2] - X[0][1]*X[1][3]*X[2][0]*X[3][2] - X[0][3]*X[1][0]*X[2][1]*X[3][2] + X[0][0]*X[1][3]*X[2][1]*X[3][2]+ X[0][1]*X[1][0]*X[2][3]*X[3][2] - X[0][0]*X[1][1]*X[2][3]*X[3][2] - X[0][2]*X[1][1]*X[2][0]*X[3][3] + X[0][1]*X[1][2]*X[2][0]*X[3][3]+ X[0][2]*X[1][0]*X[2][1]*X[3][3] - X[0][0]*X[1][2]*X[2][1]*X[3][3] - X[0][1]*X[1][0]*X[2][2]*X[3][3] + X[0][0]*X[1][1]*X[2][2]*X[3][3] ); return value; } void invertMat44(double X[][4], double Y[][4]) { double det = calDet44(X); Y[0][0] = X[1][2]*X[2][3]*X[3][1] - X[1][3]*X[2][2]*X[3][1] + X[1][3]*X[2][1]*X[3][2] - X[1][1]*X[2][3]*X[3][2] - X[1][2]*X[2][1]*X[3][3] + X[1][1]*X[2][2]*X[3][3]; Y[0][1] = X[0][3]*X[2][2]*X[3][1] - X[0][2]*X[2][3]*X[3][1] - X[0][3]*X[2][1]*X[3][2] + X[0][1]*X[2][3]*X[3][2] + X[0][2]*X[2][1]*X[3][3] - X[0][1]*X[2][2]*X[3][3]; Y[0][2] = X[0][2]*X[1][3]*X[3][1] - X[0][3]*X[1][2]*X[3][1] + X[0][3]*X[1][1]*X[3][2] - X[0][1]*X[1][3]*X[3][2] - X[0][2]*X[1][1]*X[3][3] + X[0][1]*X[1][2]*X[3][3]; Y[0][3] = X[0][3]*X[1][2]*X[2][1] - X[0][2]*X[1][3]*X[2][1] - X[0][3]*X[1][1]*X[2][2] + X[0][1]*X[1][3]*X[2][2] + X[0][2]*X[1][1]*X[2][3] - X[0][1]*X[1][2]*X[2][3]; Y[1][0] = X[1][3]*X[2][2]*X[3][0] - X[1][2]*X[2][3]*X[3][0] - X[1][3]*X[2][0]*X[3][2] + X[1][0]*X[2][3]*X[3][2] + X[1][2]*X[2][0]*X[3][3] - X[1][0]*X[2][2]*X[3][3]; Y[1][1] = X[0][2]*X[2][3]*X[3][0] - X[0][3]*X[2][2]*X[3][0] + X[0][3]*X[2][0]*X[3][2] - X[0][0]*X[2][3]*X[3][2] - X[0][2]*X[2][0]*X[3][3] + X[0][0]*X[2][2]*X[3][3]; Y[1][2] = X[0][3]*X[1][2]*X[3][0] - X[0][2]*X[1][3]*X[3][0] - X[0][3]*X[1][0]*X[3][2] + X[0][0]*X[1][3]*X[3][2] + X[0][2]*X[1][0]*X[3][3] - X[0][0]*X[1][2]*X[3][3]; Y[1][3] = X[0][2]*X[1][3]*X[2][0] - X[0][3]*X[1][2]*X[2][0] + X[0][3]*X[1][0]*X[2][2] - X[0][0]*X[1][3]*X[2][2] - X[0][2]*X[1][0]*X[2][3] + X[0][0]*X[1][2]*X[2][3]; Y[2][0] = X[1][1]*X[2][3]*X[3][0] - X[1][3]*X[2][1]*X[3][0] + X[1][3]*X[2][0]*X[3][1] - X[1][0]*X[2][3]*X[3][1] - X[1][1]*X[2][0]*X[3][3] + X[1][0]*X[2][1]*X[3][3]; Y[2][1] = X[0][3]*X[2][1]*X[3][0] - X[0][1]*X[2][3]*X[3][0] - X[0][3]*X[2][0]*X[3][1] + X[0][0]*X[2][3]*X[3][1] + X[0][1]*X[2][0]*X[3][3] - X[0][0]*X[2][1]*X[3][3]; Y[2][2] = X[0][1]*X[1][3]*X[3][0] - X[0][3]*X[1][1]*X[3][0] + X[0][3]*X[1][0]*X[3][1] - X[0][0]*X[1][3]*X[3][1] - X[0][1]*X[1][0]*X[3][3] + X[0][0]*X[1][1]*X[3][3]; Y[2][3] = X[0][3]*X[1][1]*X[2][0] - X[0][1]*X[1][3]*X[2][0] - X[0][3]*X[1][0]*X[2][1] + X[0][0]*X[1][3]*X[2][1] + X[0][1]*X[1][0]*X[2][3] - X[0][0]*X[1][1]*X[2][3]; Y[3][0] = X[1][2]*X[2][1]*X[3][0] - X[1][1]*X[2][2]*X[3][0] - X[1][2]*X[2][0]*X[3][1] + X[1][0]*X[2][2]*X[3][1] + X[1][1]*X[2][0]*X[3][2] - X[1][0]*X[2][1]*X[3][2]; Y[3][1] = X[0][1]*X[2][2]*X[3][0] - X[0][2]*X[2][1]*X[3][0] + X[0][2]*X[2][0]*X[3][1] - X[0][0]*X[2][2]*X[3][1] - X[0][1]*X[2][0]*X[3][2] + X[0][0]*X[2][1]*X[3][2]; Y[3][2] = X[0][2]*X[1][1]*X[3][0] - X[0][1]*X[1][2]*X[3][0] - X[0][2]*X[1][0]*X[3][1] + X[0][0]*X[1][2]*X[3][1] + X[0][1]*X[1][0]*X[3][2] - X[0][0]*X[1][1]*X[3][2]; Y[3][3] = X[0][1]*X[1][2]*X[2][0] - X[0][2]*X[1][1]*X[2][0] + X[0][2]*X[1][0]*X[2][1] - X[0][0]*X[1][2]*X[2][1] - X[0][1]*X[1][0]*X[2][2] + X[0][0]*X[1][1]*X[2][2]; for (int i=0; i<4; i++) for (int j=0; j<4; j++) Y[i][j] = Y[i][j]/det; } void subtractVec(double *a, double *b, double *c, int s) { for (int i=0; i<s; i++) c[i] = a[i]-b[i]; } void negateVec(double *a, int s) { for (int i=0; i<s; i++) a[i] = -a[i]; } //s1,s2,s3: fastest to slowest void sliceImageDouble(double *input, int s1, int s2, int s3, double *output, int indS1) { for (int i=0; i<s3; i++) for (int j=0; j<s2; j++) { output[i*s2+j] = input[i*s2*s1+j*s1+indS1]*input[i*s2*s1+j*s1+s1-1]; } } unsigned char quantizeDouble(double val, double minVal, double maxVal) { return CLIP((val-minVal)*255.0/(maxVal-minVal),0,255); } //3D data, fastest to slowest void quantizeImageDouble3D(double *input, unsigned char *output, int s0, int s1, int s2) { double maxVal[4]; maxVal[0] = maxVal[1] = maxVal[2] = maxVal[3] = -(1<<15); double minVal[4]; minVal[0] = minVal[1] = minVal[2] = minVal[3] = ((1<<15) - 1); for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { if (input[i*s1*s0+j*s0+k]>maxVal[k]) maxVal[k] = input[i*s1*s0+j*s0+k]; if (input[i*s1*s0+j*s0+k]<minVal[k]) minVal[k] = input[i*s1*s0+j*s0+k]; } for (int i=0; i<4; i++) printf("minmax %d = [%f,%f]\n",i,minVal[i],maxVal[i]); for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { output[i*s1*s0+j*s0+k] = quantizeDouble(input[i*s1*s0+j*s0+k],minVal[k],maxVal[k]); } } void quantizeImageDouble3D_Range(double *input, unsigned char *output, int s0, int s1, int s2, double *range) { for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { output[i*s1*s0+j*s0+k] = quantizeDouble(input[i*s1*s0+j*s0+k],range[k*2],range[k*2+1]); } } template<class T> void quantizeImage3D(T *input, unsigned char *output, int s0, int s1, int s2) { double maxVal[4]; maxVal[0] = maxVal[1] = maxVal[2] = maxVal[3] = -(1<<15); double minVal[4]; minVal[0] = minVal[1] = minVal[2] = minVal[3] = ((1<<15) - 1); for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { if (input[i*s1*s0+j*s0+k]>maxVal[k]) maxVal[k] = input[i*s1*s0+j*s0+k]; if (input[i*s1*s0+j*s0+k]<minVal[k]) minVal[k] = input[i*s1*s0+j*s0+k]; } for (int i=0; i<4; i++) printf("minmax %d = [%f,%f]\n",i,minVal[i],maxVal[i]); for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { output[i*s1*s0+j*s0+k] = quantizeDouble(input[i*s1*s0+j*s0+k],minVal[k],maxVal[k]); } } void applyMask(unsigned char *input, int s0, int s1, int s2, int *mask, unsigned char *output) { for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { output[i*s1*s0+j*s0+k] = input[i*s1*s0+j*s0+k]*mask[i*s1+j]; } } void removeChannel(unsigned char *input, int s0, int s1, int s2, int chan, unsigned char *output) { memcpy(output,input,s0*s1*s2*sizeof(unsigned char)); for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) output[i*s1*s0+j*s0+chan] = 0; } //---end of cuda_volume_rendering functions template<class T> void setPlane(T* image, int s1, int s2, int s3, T val, int s1i) { for (int i=0; i<s3; i++) for (int j=0; j<s2; j++) image[i*s2*s1+j*s1+s1i] = val; } void transposeMat33(double X[][3], double Y[][3]) { for (int i=0; i<3; i++) for (int j=i; j<3; j++) { Y[i][j]=X[j][i]; Y[j][i]=X[i][j]; } } float linearizeDepth(float depth, float zNear, float zFar) { return (2.0 * zFar * zNear) / (zFar + zNear - depth * (zFar - zNear)); } float linearizeDepthOrtho(float depth, float zNear, float zFar) { //the returned depth is relative to the "at" point return (depth*(zFar-zNear)+zFar+zNear)/2; } template<class T> void saveImage(int width, int height, int nchan, T *data, char *name) { TGAImage *img = new TGAImage(width,height); unsigned char* dataQuantized = new unsigned char[height*width*nchan]; quantizeImage3D<T>(data,dataQuantized,nchan,width,height); Colour c; for(int x=0; x<height; x++) for(int y=0; y<width; y++) { c.a = 255; c.b = c.g = c.r = 0; switch (nchan) { case 4: c.a = dataQuantized[x*width*nchan+y*nchan+3]; case 3: c.b = dataQuantized[x*width*nchan+y*nchan+2]; case 2: c.g = dataQuantized[x*width*nchan+y*nchan+1]; case 1: c.r = dataQuantized[x*width*nchan+y*nchan]; } img->setPixel(c,x,y); } img->WriteImage(name); delete img; delete[] dataQuantized; } template<class T> void saveImageWithoutQuantizing(int width, int height, int nchan, T *data, char *name) { TGAImage *img = new TGAImage(width,height); Colour c; for(int x=0; x<height; x++) for(int y=0; y<width; y++) { c.a = 255; c.b = c.g = c.r = 0; switch (nchan) { case 4: c.a = data[x*width*nchan+y*nchan+3]; case 3: c.b = data[x*width*nchan+y*nchan+2]; case 2: c.g = data[x*width*nchan+y*nchan+1]; case 1: c.r = data[x*width*nchan+y*nchan]; } img->setPixel(c,x,y); } img->WriteImage(name); delete img; } //image1 and image2 should have same spatial size (except number of channels, i.e. fastest axis) template <class T1, class T2> void copyImageChannel(T1 *image1,int s10,int s11,int s12,int c1,T2 *image2,int s20,int c2) { for (int i=0; i<s12; i++) for (int j=0; j<s11; j++) { int ind1 = i*s11*s10 + j*s10 + c1; int ind2 = i*s11*s20 + j*s20 + c2; image2[ind2] = image1[ind1]; } } double computeAngle(double *v1, double *v2) { double dp = dotProduct(v1,v2,3); return acos(dp)*180/M_PI; } void render(Hale::Viewer *viewer){ viewer->draw(); viewer->bufferSwap(); } glm::vec4 convertDepthBuffToWorldPos(int w, int h, double depth, Hale::Viewer *viewer) { double depthv = linearizeDepthOrtho(lerp(-1,1,0,depth,1),viewer->camera.clipNear(),viewer->camera.clipFar()); double wv, hv; wv = w-viewer->widthBuffer()/2; hv = h-viewer->heightBuffer()/2; glm::vec3 diff = viewer->camera.at() - viewer->camera.from(); double dist = glm::length(diff); double fangle = viewer->camera.fov()*AIR_PI/360; double vextent = dist*tan(fangle)*2; double pixelsize = vextent/viewer->heightBuffer(); wv = wv*pixelsize; hv = hv*pixelsize; depthv = -(depthv+dist); printf("Inside convertDepthBuffToWorldPos: viewpos = %f,%f,%f\n",wv,hv,depthv); glm::vec4 vpos; vpos.x = wv; vpos.y = hv; vpos.z = depthv; vpos.w = 1; glm::vec4 result; result = viewer->camera.viewInv()*vpos; return result; } glm::vec4 convertDepthBuffToViewPos(int w, int h, double depth, Hale::Viewer *viewer) { double depthv = linearizeDepthOrtho(lerp(-1,1,0,depth,1),viewer->camera.clipNear(),viewer->camera.clipFar()); double wv, hv; wv = w-viewer->widthBuffer()/2; hv = h-viewer->heightBuffer()/2; glm::vec3 diff = viewer->camera.at() - viewer->camera.from(); double dist = glm::length(diff); double fangle = viewer->camera.fov()*AIR_PI/360; double vextent = dist*tan(fangle)*2; double pixelsize = vextent/viewer->heightBuffer(); wv = wv*pixelsize; hv = hv*pixelsize; depthv = -(depthv+dist); printf("Inside convertDepthBuffToViewPos: winpos = %d,%d; viewpos = %f,%f,%f\n",w,h,wv,hv,depthv); glm::vec4 vpos; vpos.x = wv; vpos.y = hv; vpos.z = depthv; vpos.w = 1; return vpos; } glm::vec4 convertWorldToViewPos(double x, double y, double z, Hale::Viewer *viewer) { glm::vec4 wpos; wpos.x = x; wpos.y = y; wpos.z = z; wpos.w = 1; glm::vec4 vpos; vpos = viewer->camera.view()*wpos; return vpos; } void mainInit() { for (int i=0; i<=NTEX; i++) d_volumeArray[i] = 0; } class Queue { public: Queue(int isize) { size = isize; nin = nrrdNew(); filemem0 = filemem1 = 0; } Queue() { size = NTEX; nin = nrrdNew(); filemem0 = filemem1 = 0; } ~Queue() { } /* hipArray* find(int time) { if (timetoindex.find(time) == timetoindex.end()) return NULL; else return d_volumeArray[timetoindex[time]]; } */ int find(int time) { if (timetoindex.find(time) == timetoindex.end()) return -1; else return timetoindex[time]; } int findFarthestTime(int time) { int dismax = -1; int maxind; for (int i=0; i<elems.size(); i++) if (abs(time-elems[i])>dismax) { dismax = abs(time-elems[i]); maxind = i; } return maxind; } //hipArray* push(int time, int *arr_nameid, char* pathprefix, airArray *mop) int push(int time, int *arr_nameid, char* pathprefix, airArray *mop) { printf("Inside Queue.push(): time = %d\n",time); for (int i=0; i<elems.size(); i++) printf("%d ", elems[i]); printf("\n"); //hipArray* findres = find(time); int findres = find(time); if (findres>=0) { printf("findres = %d\n", findres); return findres; } int curvol; if (elems.size()<size) { elems.push_back(time); timetoindex[time] = elems.size()-1; curvol = elems.size()-1; printf("curvol in 'if': %d\n",curvol); } else { curvol = findFarthestTime(time); timetoindex.erase(elems[curvol]); elems[curvol] = time; timetoindex[time] = curvol; printf("curvol in 'else': %d\n",curvol); } char inname[1000]; char *err; int curnameind = arr_nameid[time]; sprintf(inname,"%s/%d.nrrd",pathprefix,curnameind); cout<<"inname = "<<inname<<endl; if (nrrdLoad(nin, inname, NULL)) { err = biffGetDone(NRRD); fprintf(stderr, "%s: trouble reading \"%s\":\n%s", "Queue.push()", inname, err); free(err); exit(1); } cout<<"read file "<<inname<<endl; unsigned int pixSize; hipChannelFormatDesc channelDesc; pixSize = sizeof(float); channelDesc = hipCreateChannelDesc<float>(); /* if (3 != nin->dim && 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 3D space, (not %uD in %uD)\n", "Queue.push()", nin->dim, nin->spaceDim); airMopError(mop); exit(1); } */ if (nin->dim == 3) { dim[0] = 1; dim[1] = nin->axis[0].size; dim[2] = nin->axis[1].size; dim[3] = nin->axis[2].size; } else //4-channel { dim[0] = nin->axis[0].size; dim[1] = nin->axis[1].size; dim[2] = nin->axis[2].size; dim[3] = nin->axis[3].size; } int channel = 1; if (!filemem0) { printf("in Queue.push, dim=[%d,%d,%d,%d], before allocating filemem\n",dim[0],dim[1],dim[2],dim[3]); filemem0 = new float[dim[1]*dim[2]*dim[3]]; filemem1 = new float[dim[1]*dim[2]*dim[3]]; } printf("in Queue.push, before setting filemem\n"); if (nin->dim == 3) { for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i]; } } else { for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i*2]; filemem1[i] = ((short*)nin->data)[i*2+1]; } } const hipExtent volumeSize = make_hipExtent(dim[1], dim[2], dim[3]); if (!d_volumeArray[curvol]) { hipMalloc3DArray(&d_volumeArray[curvol], &channelDesc, volumeSize); hipMalloc3DArray(&d_volumeArray1[curvol], &channelDesc, volumeSize); } hipMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_hipPitchedPtr((void*)filemem0, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[curvol]; copyParams0.extent = volumeSize; copyParams0.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams0); hipMemcpy3DParms copyParams1 = {0}; copyParams1.srcPtr = make_hipPitchedPtr((void*)filemem1, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray1[curvol]; copyParams1.extent = volumeSize; copyParams1.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams1); printf("end of Queue.push()\n"); //return d_volumeArray[curvol]; return curvol; } int* getDataDim() { return dim; } private: int size; int cursize; vector<int> elems; unordered_map<int,int> timetoindex; //nin data float *filemem0, *filemem1; Nrrd *nin; int dim[4]; }; void interpolVolAndRender(int &curVolInMem, int mini, double alpha, Queue &queue, int *arr_nameid, double *arr_center, char *pathprefix, airArray *mop, unsigned int pixSize, int *dim, int *size, double *eigenvec, double verextent2, double swidth, double sstep, int nOutChannel, float *d_volmem, float *d_volmem2, int *d_dim, int *d_size, double *d_dir1, double *d_dir2, double *d_center, double *imageDouble, double *d_imageDouble, unsigned char *imageQuantized, Hale::Viewer &viewer, Hale::Viewer &viewer2, Hale::Polydata *hpldview2, Hale::Polydata *hpld_inter, double spherescale, Hale::Polydata *hpld_sq_inter, bool statePKey, int kern, bool stateIKey) { int count; double dir1[3],dir2[3],center[3]; const hipExtent volumeSize = make_hipExtent(dim[1], dim[2], dim[3]); hipChannelFormatDesc channelDesc; channelDesc = hipCreateChannelDesc<float>(); if (curVolInMem != mini) { curVolInMem = mini; count = mini; hipError_t errCu; //hipArray* d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); hipArray* d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex0.normalized = false; tex0.filterMode = hipFilterModeLinear; tex0.addressMode[0] = hipAddressModeBorder; tex0.addressMode[1] = hipAddressModeBorder; tex0.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex0, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex3.normalized = false; tex3.filterMode = hipFilterModeLinear; tex3.addressMode[0] = hipAddressModeBorder; tex3.addressMode[1] = hipAddressModeBorder; tex3.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex3, d_curvolarr, channelDesc); count = mini+1; //d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex1.normalized = false; tex1.filterMode = hipFilterModeLinear; tex1.addressMode[0] = hipAddressModeBorder; tex1.addressMode[1] = hipAddressModeBorder; tex1.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex1, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex4.normalized = false; tex4.filterMode = hipFilterModeLinear; tex4.addressMode[0] = hipAddressModeBorder; tex4.addressMode[1] = hipAddressModeBorder; tex4.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex4, d_curvolarr, channelDesc); } int numThread1D; numThread1D = 8; dim3 threadsPerBlock(numThread1D,numThread1D,numThread1D); dim3 numBlocks((dim[1]+numThread1D-1)/numThread1D,(dim[2]+numThread1D-1)/numThread1D,(dim[3]+numThread1D-1)/numThread1D); //kernel_interpol<<<numBlocks,threadsPerBlock>>>(d_volmem,d_dim,alpha); hipLaunchKernelGGL(( kernel_interpol2), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_volmem,d_volmem2,d_dim,alpha); hipError_t errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error After kernel_nterpol when clicking: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync After kernel_nterpol when clicking: %s\n", hipGetErrorString(errCu)); //copy from device's global mem to texture mem if (!d_volumeArray[NTEX]) { hipMalloc3DArray(&d_volumeArray[NTEX], &channelDesc, volumeSize); hipMalloc3DArray(&d_volumeArray1[NTEX], &channelDesc, volumeSize); } hipMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_hipPitchedPtr((void*)d_volmem, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[NTEX]; copyParams0.extent = volumeSize; copyParams0.kind = hipMemcpyDeviceToDevice; hipMemcpy3D(&copyParams0); tex2.normalized = false; tex2.filterMode = hipFilterModeLinear; tex2.addressMode[0] = hipAddressModeBorder; tex2.addressMode[1] = hipAddressModeBorder; tex2.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex2, d_volumeArray[NTEX], channelDesc); hipMemcpy3DParms copyParams1 = {0}; copyParams1.srcPtr = make_hipPitchedPtr((void*)d_volmem2, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray1[NTEX]; copyParams1.extent = volumeSize; copyParams1.kind = hipMemcpyDeviceToDevice; hipMemcpy3D(&copyParams1); tex5.normalized = false; tex5.filterMode = hipFilterModeLinear; tex5.addressMode[0] = hipAddressModeBorder; tex5.addressMode[1] = hipAddressModeBorder; tex5.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex5, d_volumeArray1[NTEX], channelDesc); //after that call the normal kernel to do MIP count = mini; double FT[3]; double FN[3],FB[3]; double dr[3],ddr[3]; if (kern==1) { for (int i=0; i<3; i++) { center[i] = cubicFilter<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); dr[i] = cubicFilter_G<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); ddr[i] = cubicFilter_GG<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } else { for (int i=0; i<3; i++) { center[i] = ctmr(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); dr[i] = ctmr_g(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); ddr[i] = ctmr_gg(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } normalize(dr,3); normalize(ddr,3); memcpy(FT,dr,sizeof(double)*3); memcpy(FN,eigenvec,sizeof(double)*3); normalize(FN,3); cross(FT,FN,FB); cross(FB,FT,FN); memcpy(dir1,FN,sizeof(double)*3); memcpy(dir2,FB,sizeof(double)*3); hipMemcpy(d_dir1, dir1, 3*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_dir2, dir2, 3*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_center,center,3*sizeof(double), hipMemcpyHostToDevice); numThread1D = 16; dim3 threadsPerBlock2(numThread1D,numThread1D); dim3 numBlocks2((size[0]+numThread1D-1)/numThread1D,(size[1]+numThread1D-1)/numThread1D); if (statePKey) { if (stateIKey) hipLaunchKernelGGL(( kernel_peak_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_peak), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } else { if (stateIKey) hipLaunchKernelGGL(( kernel_cpr_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_cpr), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error After kernel_cpr when clicking: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync After kernel_cpr when clicking: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); short width = size[0]; short height = size[1]; //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); if (statePKey) quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); else quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); glm::mat4 fmat2 = glm::mat4(); fmat2[0][0] = spherescale; fmat2[1][1] = spherescale; fmat2[2][2] = spherescale; fmat2[3][0] = center[0]; fmat2[3][1] = center[1]; fmat2[3][2] = center[2]; hpld_inter->model(fmat2); viewer.current(); glm::mat4 tmat_sq_inter = glm::mat4(); tmat_sq_inter[0][0] = FN[0]; tmat_sq_inter[0][1] = FN[1]; tmat_sq_inter[0][2] = FN[2]; tmat_sq_inter[0][3] = 0; tmat_sq_inter[1][0] = FB[0]; tmat_sq_inter[1][1] = FB[1]; tmat_sq_inter[1][2] = FB[2]; tmat_sq_inter[1][3] = 0; tmat_sq_inter[2][0] = FT[0]; tmat_sq_inter[2][1] = FT[1]; tmat_sq_inter[2][2] = FT[2]; tmat_sq_inter[2][3] = 0; tmat_sq_inter[3][0] = center[0]; tmat_sq_inter[3][1] = center[1]; tmat_sq_inter[3][2] = center[2]; tmat_sq_inter[3][3] = 1; glm::mat4 smat_sq_inter = glm::mat4(); smat_sq_inter[0][0] = 2; smat_sq_inter[1][1] = 2; glm::mat4 fmat_sq_inter = tmat_sq_inter*smat_sq_inter; hpld_sq_inter->model(fmat_sq_inter); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); //hpld_sq_inter->setTexture((char*)"myTextureSampler",(unsigned char *)imageQuantized,size[0],size[1],4); } int main(int argc, const char **argv) { const char *me; char *err; hestOpt *hopt=NULL; hestParm *hparm; airArray *mop; //cache queue for GPU memory //int queue[NTEX]; //int queueCurSize = 0; //unordered_map<int,int> timetoindex; Queue queue; char *name; char *texname1, *texname2; double dir1[3],dir2[3]; //tmp fixed track coords, and radius double track[3] = {366.653991263,89.6381792864,104.736646409}; double trackhomo[4]; trackhomo[0] = track[0]; trackhomo[1] = track[1]; trackhomo[2] = track[2]; trackhomo[3] = 1; double trackw[4]; double radius = 10; double center[3]; int size[2]; Nrrd *nin; char *outname; char inname[100]; char *centername; double swidth, sstep; //width and step to take inside the slice short *outdata; char outnameslice[100]; double verextent; //vertical extent to project MIP char *pathprefix; int kern; int curVolInMem; /* boilerplate hest code */ me = argv[0]; mop = airMopNew(); hparm = hestParmNew(); airMopAdd(mop, hparm, (airMopper)hestParmFree, airMopAlways); /* setting up the command-line options */ hparm->respFileEnable = AIR_TRUE; hparm->noArgsIsNoProblem = AIR_TRUE; hestOptAdd(&hopt, "isize", "sx sy", airTypeUInt, 2, 2, size, "200 200", "output image sizes"); hestOptAdd(&hopt, "vex", "ve", airTypeDouble, 1, 1, &verextent, "200", "vertical extent in projecting MIP"); hestOptAdd(&hopt, "kern", "kernel", airTypeInt, 1, 1, &kern, "0", "kernel used in convolution"); hestOptAdd(&hopt, "dir1", "x y z", airTypeDouble, 3, 3, dir1, "1 0 0", "first direction of the generated image"); hestOptAdd(&hopt, "dir2", "x y z", airTypeDouble, 3, 3, dir2, "0 -1 0", "second direction of the generated image"); hestOptAdd(&hopt, "swidth", "sw", airTypeDouble, 1, 1, &swidth, "1", "the width of the slice to cut"); hestOptAdd(&hopt, "sstep", "ss", airTypeDouble, 1, 1, &sstep, "1", "the step of Maximum Intensity Projection through slice"); hestOptAdd(&hopt, "i", "name", airTypeString, 1, 1, &centername, "coord_newtrack_pioneer.txt", "name of files centaining centers"); hestOptAdd(&hopt, "pref", "path", airTypeString, 1, 1, &pathprefix, "/media/trihuynh/781B8CE3469A7908/scivisdata", "prefix of the path to the folder containing data files"); hestOptAdd(&hopt, "o", "name", airTypeString, 1, 1, &outname, "cpr.nrrd", "name of output image"); hestParseOrDie(hopt, argc-1, argv+1, hparm, me, "demo program", AIR_TRUE, AIR_TRUE, AIR_TRUE); airMopAdd(mop, hopt, (airMopper)hestOptFree, airMopAlways); airMopAdd(mop, hopt, (airMopper)hestParseFree, airMopAlways); /* Compute threshold (isovalue) */ cout<<"After TEEM processing of input arguments"<<endl; mainInit(); int countline = 0; string line; ifstream infile(centername); int *arr_nameid; double *arr_center; while (std::getline(infile, line)) { ++countline; } infile.clear(); infile.seekg(0, ios::beg); arr_nameid = new int[countline]; arr_center = new double[countline*3]; for (int i=0; i<countline; i++) { infile >> arr_nameid[i]; infile >> arr_center[i*3]; infile >> arr_center[i*3+1]; infile >> arr_center[i*3+2]; } infile.close(); cout<<"Initialized countline = "<<countline<<endl; //double thresdis = 1.0; double thresdis = -1.0; //not checking vector<double> vcenter; vector<int> vnameid; vcenter.push_back(arr_center[0]); vcenter.push_back(arr_center[1]); vcenter.push_back(arr_center[2]); vnameid.push_back(arr_nameid[0]); //double thresang = 150; double thresang = 200; //not checking //correction by thresholding distance for (int i=1; i<countline; i++) { int countv = vcenter.size(); if (diss2P(vcenter[countv-3],vcenter[countv-2],vcenter[countv-1],arr_center[i*3+0],arr_center[i*3+1],arr_center[i*3+2])<thresdis) { continue; } else { vcenter.push_back(arr_center[i*3+0]); vcenter.push_back(arr_center[i*3+1]); vcenter.push_back(arr_center[i*3+2]); vnameid.push_back(arr_nameid[i]); } } countline = vcenter.size()/3; memcpy(arr_center,vcenter.data(),sizeof(double)*countline*3); memcpy(arr_nameid,vnameid.data(),sizeof(int)*countline); //correction by thresholding angle vcenter.clear(); vcenter.push_back(arr_center[0]); vcenter.push_back(arr_center[1]); vcenter.push_back(arr_center[2]); vcenter.push_back(arr_center[3]); vcenter.push_back(arr_center[4]); vcenter.push_back(arr_center[5]); vnameid.clear(); vnameid.push_back(arr_nameid[0]); vnameid.push_back(arr_nameid[1]); double prevec[3]; prevec[0] = arr_center[3]-arr_center[0]; prevec[1] = arr_center[4]-arr_center[1]; prevec[2] = arr_center[5]-arr_center[2]; normalize(prevec,3); for (int i=2; i<countline; i++) { double curvec[3]; curvec[0] = arr_center[i*3+0]-arr_center[(i-1)*3+0]; curvec[1] = arr_center[i*3+1]-arr_center[(i-1)*3+1]; curvec[2] = arr_center[i*3+2]-arr_center[(i-1)*3+2]; normalize(curvec,3); double ang = computeAngle(prevec,curvec); if (ang>thresang) continue; memcpy(prevec,curvec,sizeof(double)*3); vcenter.push_back(arr_center[i*3+0]); vcenter.push_back(arr_center[i*3+1]); vcenter.push_back(arr_center[i*3+2]); vnameid.push_back(arr_nameid[i]); } //adding more vertices at the beginning and ending to have enough convolution points /* double firstpoint[3]; firstpoint[0] = vcenter[0]; firstpoint[1] = vcenter[1]; firstpoint[2] = vcenter[2]; int firstnameid = vnameid[0]; double lastpoint[3]; lastpoint[0] = vcenter[vcenter.size()-3]; lastpoint[1] = vcenter[vcenter.size()-2]; lastpoint[2] = vcenter[vcenter.size()-1]; int lastnameid = vnameid[vnameid.size()-1]; vcenter.insert(vcenter.begin(),firstpoint[2]); vcenter.insert(vcenter.begin(),firstpoint[1]); vcenter.insert(vcenter.begin(),firstpoint[0]); vnameid.insert(vnameid.begin(),firstnameid); vcenter.push_back(lastpoint[0]); vcenter.push_back(lastpoint[1]); vcenter.push_back(lastpoint[2]); vcenter.push_back(lastpoint[0]); vcenter.push_back(lastpoint[1]); vcenter.push_back(lastpoint[2]); vnameid.push_back(lastnameid); vnameid.push_back(lastnameid); */ printf("after correcting input\n"); countline = vcenter.size()/3; memcpy(arr_center,vcenter.data(),sizeof(double)*countline*3); memcpy(arr_nameid,vnameid.data(),sizeof(int)*countline); //clustering double disclus = 1.0; vector<vector<int>> vcluster; vector<double> vcenterclus; int i=0; while (i<countline) { vector<int> curclus; //curclus.push_back(arr_center[i*3]); //curclus.push_back(arr_center[i*3+1]); //curclus.push_back(arr_center[i*3+2]); curclus.push_back(i); while (i+1<countline) { if (diss2P(arr_center[i*3],arr_center[i*3+1],arr_center[i*3+2],arr_center[(i+1)*3],arr_center[(i+1)*3+1],arr_center[(i+1)*3+2])<disclus) { i++; //curclus.push_back(arr_center[i*3]); //curclus.push_back(arr_center[i*3+1]); //curclus.push_back(arr_center[i*3+2]); curclus.push_back(i); } else break; } i++; //compute center of this cluster int n = curclus.size(); double centerclus[3]; memset(centerclus,0,sizeof(double)*3); for (int j=0; j<n; j++) { centerclus[0] += arr_center[curclus[j]*3]; centerclus[1] += arr_center[curclus[j]*3+1]; centerclus[2] += arr_center[curclus[j]*3+2]; } vcenterclus.push_back(centerclus[0]/n); vcenterclus.push_back(centerclus[1]/n); vcenterclus.push_back(centerclus[2]/n); vcluster.push_back(curclus); } outdata = new short[size[0]*size[1]*countline]; cout<<"Initialized outdata"<<endl; int curnameind; float* filemem0 = NULL; float* filemem1 = NULL; int initalized = 0; double *imageDouble = NULL; int *d_dim; double *d_dir1; double *d_dir2; double *d_imageDouble; int *d_size; double *d_center; int count = 0; nin = nrrdNew(); Nrrd *ndblpng = nrrdNew(); float camfr[3], camat[3], camup[3], camnc, camfc, camFOV; int camortho; unsigned int camsize[2]; /* camfr[0] = arr_center[countline/2*3+0]; camfr[1] = arr_center[countline/2*3+1]; camfr[2] = arr_center[countline/2*3+2]-5; camat[0] = arr_center[countline/2*3+0]; camat[1] = arr_center[countline/2*3+1]; camat[2] = arr_center[countline/2*3+2]; camup[0] = 1; camup[1] = 0; camup[2] = 0; camnc = -10; camfc = 10; camFOV = 170; camortho = 1; camsize[0] = 500; camsize[1] = 500; */ //test synthetic data camfr[0] = arr_center[countline/2*3+0]; camfr[1] = arr_center[countline/2*3+1]; camfr[2] = arr_center[countline/2*3+2]-50; camat[0] = arr_center[countline/2*3+0]; camat[1] = arr_center[countline/2*3+1]; camat[2] = arr_center[countline/2*3+2]; camup[0] = 1; camup[1] = 0; camup[2] = 0; camnc = -100; camfc = 100; camFOV = 170; camortho = 1; camsize[0] = 500; camsize[1] = 500; //debug clicking /* camfr[0] = 0; camfr[1] = 1; camfr[2] = -6; camat[0] = 0; camat[1] = 1; camat[2] = -1; camup[0] = 1; camup[1] = 0; camup[2] = 0; camnc = -10; camfc = 10; camFOV = 90; camortho = 1; camsize[0] = 500; camsize[1] = 500; */ Hale::init(); //Hale::debugging = 1; Hale::Scene scene; /* then create viewer (in order to create the OpenGL context) */ Hale::Viewer viewer(camsize[0], camsize[1], "Viewer1", &scene); viewer.lightDir(glm::vec3(-1.0f, 1.0f, 3.0f)); viewer.camera.init(glm::vec3(camfr[0], camfr[1], camfr[2]), glm::vec3(camat[0], camat[1], camat[2]), glm::vec3(camup[0], camup[1], camup[2]), camFOV, (float)camsize[0]/camsize[1], camnc, camfc, camortho); viewer.current(); viewer.refreshCB((Hale::ViewerRefresher)render); viewer.refreshData(&viewer); Hale::Scene scene2; Hale::Viewer viewer2(camsize[0], camsize[1], "Viewer2", &scene2); viewer2.lightDir(glm::vec3(-1.0f, 1.0f, 3.0f)); viewer2.camera.init(glm::vec3(camfr[0], camfr[1], camfr[2]), glm::vec3(camat[0], camat[1], camat[2]), glm::vec3(camup[0], camup[1], camup[2]), camFOV, (float)camsize[0]/camsize[1], camnc, camfc, camortho); //viewer2.current(); //viewer2.refreshCB((Hale::ViewerRefresher)render); //viewer2.refreshData(&viewer2); //viewer.current(); printf("Initialized viewer\n"); Hale::Program *newprog = new Hale::Program("tex-vert-cpr.glsl","texdemo-frag.glsl"); newprog->compile(); newprog->bindAttribute(Hale::vertAttrIdxXYZW, "positionVA"); newprog->bindAttribute(Hale::vertAttrIdxRGBA, "colorVA"); newprog->bindAttribute(Hale::vertAttrIdxNorm, "normalVA"); newprog->bindAttribute(Hale::vertAttrIdxTex2, "tex2VA"); newprog->link(); double spherescale = 0.2; double spherescale_inter = 0.3; int density = 10; //how many points per one unit length in index-space int countls = 0; for (int i=1; i<countline-3; i++) { double dis = sqrt(diss2P(arr_center[i*3+0], arr_center[i*3+1], arr_center[i*3+2], arr_center[(i+1)*3+0], arr_center[(i+1)*3+1], arr_center[(i+1)*3+2])); countls += (dis*density); } int *ptotime = new int[countls]; double *ptofrac = new double[countls]; int *timetop = new int[countline]; memset(timetop,0,sizeof(int)*countline); limnPolyData *lpld3 = limnPolyDataNew(); limnPolyDataAlloc(lpld3, 0, countls, countls, 1); int cpointind = 0; for (int i=1; i<countline-3; i++) { double dis = sqrt(diss2P(arr_center[i*3+0], arr_center[i*3+1], arr_center[i*3+2], arr_center[(i+1)*3+0], arr_center[(i+1)*3+1], arr_center[(i+1)*3+2])); int countseg = dis*density; double tsep = 1.0/((double)countseg); timetop[i] = cpointind; for (int j=0; j<countseg; j++) { double curpoint[3]; if (kern==1) { for (int k=0; k<3; k++) curpoint[k] = cubicFilter<double>((double)j*tsep, arr_center[(i-1)*3+k], arr_center[(i)*3+k], arr_center[(i+1)*3+k], arr_center[(i+2)*3+k]); } else { for (int k=0; k<3; k++) curpoint[k] = ctmr((double)j*tsep, arr_center[(i-1)*3+k], arr_center[(i)*3+k], arr_center[(i+1)*3+k], arr_center[(i+2)*3+k]); } ELL_4V_SET(lpld3->xyzw + 4*cpointind, curpoint[0],curpoint[1],curpoint[2], 1.0); lpld3->indx[cpointind] = cpointind; ptotime[cpointind] = i; ptofrac[cpointind] = j*tsep; cpointind++; } } lpld3->type[0] = limnPrimitiveLineStrip; lpld3->icnt[0] = countls; printf("countls = %d\n", countls); //adding linestrip for original path limnPolyData *lpldorig = limnPolyDataNew(); limnPolyDataAlloc(lpldorig, 0, countline-3, countline-3, 1); for (int i=1; i<countline-2; i++) { ELL_4V_SET(lpldorig->xyzw + 4*(i-1), arr_center[(i)*3+0],arr_center[(i)*3+1],arr_center[(i)*3+2], 1.0); lpldorig->indx[i-1] = i-1; } lpldorig->type[0] = limnPrimitiveLineStrip; lpldorig->icnt[0] = countline-3; Hale::Polydata *hpldorig = new Hale::Polydata(lpldorig, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "LineStrip"); hpldorig->colorSolid(1.0,1.0,1.0); scene.add(hpldorig); limnPolyData *lpld4 = limnPolyDataNew(); limnPolyDataSpiralTubeWrap(lpld4, lpld3, 0, NULL, 10, 4, 0.1); Hale::Polydata *hpld3 = new Hale::Polydata(lpld3, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "LineStrip"); hpld3->colorSolid(1.0,0.0,0.0); Hale::Polydata *hpld4 = new Hale::Polydata(lpld4, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "SpiralTube"); hpld4->colorSolid(1.0,1.0,0.5); scene.add(hpld4); vector<Hale::Polydata *> vtexture; vector<Hale::Polydata *> vtexture2; vector<Hale::Polydata *> vsphere; vector<Hale::Polydata *> vsphereorig; unsigned char *imageQuantized; imageQuantized = new unsigned char[size[0]*size[1]*4]; double prevFT[3], prevFN[3], prevFB[3]; printf("countline after adding boundary points = %d\n", countline); printf("arr_nameid[1] = %d\n", arr_nameid[1]); printf("arr_nameid[countline-3] = %d\n", arr_nameid[countline-3]); printf("New nameid and centers:\n"); for (int i=0; i<countline; i++) printf("%d %f %f %f\n", arr_nameid[i], arr_center[i*3+0], arr_center[i*3+1], arr_center[i*3+2]); //computing PCA double cov[9]; computeCovariance(arr_center,countline,cov); double eigval[3],eigval2[3]; eigenOfHess(cov,eigval); ell_3m_eigenvalues_d(eigval2, cov, 1); double seigval = eigval[0], seigval2 = eigval2[0]; for (int i=1; i<3; i++) if (seigval>eigval[i]) seigval = eigval[i]; for (int i=1; i<3; i++) if (seigval2>eigval2[i]) seigval2 = eigval2[i]; double eigenvec[3],eigenvec2[3]; computeEigenVec(cov,seigval,eigenvec); computeEigenVec(cov,seigval2,eigenvec2); printf("eigenvalues are: %f, %f, %f\n",eigval[0],eigval[1],eigval[2]); printf("eigenvalues based on Teem's function are: %f, %f, %f\n",eigval2[0],eigval2[1],eigval2[2]); printf("eigenvector is (%f,%f,%f)\n", eigenvec[0],eigenvec[1],eigenvec[2]); printf("eigenvector based on eigenvalue of Teem is (%f,%f,%f)\n", eigenvec2[0],eigenvec2[1],eigenvec2[2]); memcpy(eigenvec,eigenvec2,sizeof(double)*3); int nOutChannel = 4; for (int clusind=1; clusind<vcluster.size()-2; clusind++) { double dr[3],ddr[3]; if (kern==1) { for (int i=0; i<3; i++) { dr[i] = cubicFilter_G<double>(0, vcenterclus[(clusind-1)*3+i], vcenterclus[(clusind)*3+i], vcenterclus[(clusind+1)*3+i], vcenterclus[(clusind+2)*3+i]); ddr[i] = cubicFilter_GG<double>(0, vcenterclus[(clusind-1)*3+i], vcenterclus[(clusind)*3+i], vcenterclus[(clusind+1)*3+i], vcenterclus[(clusind+2)*3+i]); } } else { for (int i=0; i<3; i++) { dr[i] = ctmr_g(0, vcenterclus[(clusind-1)*3+i], vcenterclus[(clusind)*3+i], vcenterclus[(clusind+1)*3+i], vcenterclus[(clusind+2)*3+i]); ddr[i] = ctmr_gg(0, vcenterclus[(clusind-1)*3+i], vcenterclus[(clusind)*3+i], vcenterclus[(clusind+1)*3+i], vcenterclus[(clusind+2)*3+i]); } } //for (count = 1; count<countline-2; count++) for (int cluselem = 0; cluselem<vcluster[clusind].size(); cluselem++) { count = vcluster[clusind][cluselem]; curnameind = arr_nameid[count]; /* for (int i=0; i<3; i++) { center[i] = cubicFilter<double>(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //center[i] = ctmr(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); printf("ctmr computation: x=%f, a0=%f, a1=%f, a2=%f, a3=%f -> res=%f\n", 0.0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i],center[i]); } printf("center = %f %f %f\n", center[0],center[1],center[2]); */ double FT[3]; double FN[3],FB[3]; /* for (int i=0; i<3; i++) dr[i] = cubicFilter_G<double>(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //dr[i] = ctmr_g(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); for (int i=0; i<3; i++) ddr[i] = cubicFilter_GG<double>(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //ddr[i] = ctmr_gg(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); */ if (kern==1) { for (int i=0; i<3; i++) { center[i] = cubicFilter<double>(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //dr[i] = cubicFilter_G<double>(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //ddr[i] = cubicFilter_GG<double>(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } else { for (int i=0; i<3; i++) { center[i] = ctmr(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //dr[i] = ctmr_g(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //ddr[i] = ctmr_gg(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } printf("dr = (%f,%f,%f)\n",dr[0],dr[1],dr[2]); printf("ddr = (%f,%f,%f)\n",ddr[0],ddr[1],ddr[2]); normalize(dr,3); normalize(ddr,3); printf("after normalizing\n"); printf("dr = (%f,%f,%f)\n",dr[0],dr[1],dr[2]); printf("ddr = (%f,%f,%f)\n",ddr[0],ddr[1],ddr[2]); memcpy(FT,dr,sizeof(double)*3); //double crossddrdr[3]; //cross(ddr,dr,crossddrdr); //cross(dr,crossddrdr,FN); memcpy(FN,eigenvec,sizeof(double)*3); normalize(FN,3); cross(FT,FN,FB); cross(FB,FT,FN); normalize(FN,3); normalize(FT,3); normalize(FB,3); memcpy(dir1,FN,sizeof(double)*3); memcpy(dir2,FB,sizeof(double)*3); printf("N = %f %f %f, B = %f %f %f, T = %f %f %f, dotNB = %f, dotNT = %f, dotBT = %f\n",FN[0],FN[1],FN[2],FB[0],FB[1],FB[2],FT[0],FT[1],FT[2], dotProduct(FN,FB,3),dotProduct(FN,FT,3),dotProduct(FB,FT,3)); if (count>1) { printf("count = %d\n", count); printf("angle of FT: %f\n", computeAngle(FT,prevFT)); printf("angle of FN: %f\n", computeAngle(FN,prevFN)); printf("angle of FB: %f\n", computeAngle(FB,prevFB)); } memcpy(prevFB,FB,sizeof(double)*3); memcpy(prevFN,FN,sizeof(double)*3); memcpy(prevFT,FT,sizeof(double)*3); limnPolyData *lpld = limnPolyDataNew(); limnPolyDataSquare(lpld, 1 << limnPolyDataInfoNorm | 1 << limnPolyDataInfoTex2); printf("after initializing lpld\n"); Hale::Polydata *hpld = new Hale::Polydata(lpld, true, NULL, "square"); hpld->program(newprog); glm::mat4 tmat = glm::mat4(); tmat[0][0] = FN[0]; tmat[0][1] = FN[1]; tmat[0][2] = FN[2]; tmat[0][3] = 0; tmat[1][0] = FB[0]; tmat[1][1] = FB[1]; tmat[1][2] = FB[2]; tmat[1][3] = 0; tmat[2][0] = FT[0]; tmat[2][1] = FT[1]; tmat[2][2] = FT[2]; tmat[2][3] = 0; tmat[3][0] = center[0]; tmat[3][1] = center[1]; tmat[3][2] = center[2]; tmat[3][3] = 1; glm::mat4 smat = glm::mat4(); smat[0][0] = 2; smat[1][1] = 2; glm::mat4 fmat = tmat*smat; hpld->model(fmat); //add a sphere limnPolyData *lpld2 = limnPolyDataNew(); limnPolyDataIcoSphere(lpld2, 1 << limnPolyDataInfoNorm, 3); Hale::Polydata *hpld2 = new Hale::Polydata(lpld2, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "IcoSphere"); hpld2->colorSolid(lerp(0,1,0,count,countline-1),lerp(1,0,0,count,countline-1),0.5); glm::mat4 fmat2 = glm::mat4(); fmat2[0][0] = spherescale; fmat2[1][1] = spherescale; fmat2[2][2] = spherescale; fmat2[3][0] = center[0]; fmat2[3][1] = center[1]; fmat2[3][2] = center[2]; fmat2[3][3] = 1; hpld2->model(fmat2); scene.add(hpld2); vsphere.push_back(hpld2); //adding sphere for original track path too limnPolyData *lpldorigsp = limnPolyDataNew(); limnPolyDataIcoSphere(lpldorigsp, 1 << limnPolyDataInfoNorm, 3); Hale::Polydata *hpldorigsp = new Hale::Polydata(lpldorigsp, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "IcoSphere"); hpldorigsp->colorSolid(lerp(0,1,0,count,countline-1),lerp(1,0,0,count,countline-1),0.5); fmat2[0][0] = spherescale; fmat2[1][1] = spherescale; fmat2[2][2] = spherescale; fmat2[3][0] = arr_center[(count)*3+0]; fmat2[3][1] = arr_center[(count)*3+1]; fmat2[3][2] = arr_center[(count)*3+2]; fmat2[3][3] = 1; hpldorigsp->model(fmat2); scene.add(hpldorigsp); vsphereorig.push_back(hpldorigsp); printf("after adding hpld to scene\n"); printf("added lpld\n"); cout<<"Before read in file, with curnameind = "<<curnameind<<", center = "<<center[0]<<" "<<center[1]<<" "<<center[2]<<endl; /* sprintf(inname,"%s/%d.nrrd",pathprefix,curnameind); cout<<"inname = "<<inname<<endl; if (nrrdLoad(nin, inname, NULL)) { err = biffGetDone(NRRD); fprintf(stderr, "%s: trouble reading \"%s\":\n%s", me, inname, err); free(err); return; } cout<<"read file "<<inname<<endl; unsigned int pixSize; hipChannelFormatDesc channelDesc; pixSize = sizeof(float); channelDesc = hipCreateChannelDesc<float>(); if (3 != nin->dim && 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 3D space, (not %uD in %uD)\n", argv[0], nin->dim, nin->spaceDim); airMopError(mop); exit(1); } double mat_trans[4][4]; mat_trans[3][0] = mat_trans[3][1] = mat_trans[3][2] = 0; mat_trans[3][3] = 1; int dim[4]; if (nin->dim == 3) { dim[0] = 1; dim[1] = nin->axis[0].size; dim[2] = nin->axis[1].size; dim[3] = nin->axis[2].size; for (int i=0; i<3; i++) { for (int j=0; j<3; j++) { mat_trans[j][i] = nin->axis[i].spaceDirection[j]; } mat_trans[i][3] = nin->spaceOrigin[i]; } } else //4-channel { dim[0] = nin->axis[0].size; dim[1] = nin->axis[1].size; dim[2] = nin->axis[2].size; dim[3] = nin->axis[3].size; for (int i=0; i<3; i++) { for (int j=0; j<3; j++) { mat_trans[j][i] = nin->axis[i+1].spaceDirection[j]; } mat_trans[i][3] = nin->spaceOrigin[i]; } } int channel = 1; if (!initalized) { filemem0 = new float[dim[1]*dim[2]*dim[3]]; filemem1 = new float[dim[1]*dim[2]*dim[3]]; } for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i*2]; filemem1[i] = ((short*)nin->data)[i*2+1]; } double mat_trans_inv[4][4]; invertMat44(mat_trans,mat_trans_inv); //tex3D stuff const hipExtent volumeSize = make_hipExtent(dim[1], dim[2], dim[3]); printf("Array size: %f MB\n", dim[1]*dim[2]*dim[3]*sizeof(float)/1024.0/1024.0); size_t free_byte ; size_t total_byte ; hipError_t errCu; errCu = hipMemGetInfo( &free_byte, &total_byte ) ; if ( hipSuccess != errCu ){ printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(errCu) ); exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; printf("GPU memory usage (before copying memory to Device): used = %f MB, free = %f MB, total = %f MB\n", used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); if (!initalized) { hipMalloc3DArray(&d_volumeArray[2], &channelDesc, volumeSize); } hipMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_hipPitchedPtr((void*)filemem0, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[2]; copyParams0.extent = volumeSize; copyParams0.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams0); */ size_t free_byte; size_t total_byte; double free_db; double total_db; double used_db; hipError_t errCu; hipChannelFormatDesc channelDesc; channelDesc = hipCreateChannelDesc<float>(); //hipArray* d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); hipArray* d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex2.normalized = false; // access with normalized texture coordinates tex2.filterMode = hipFilterModeLinear; // linear interpolation tex2.addressMode[0] = hipAddressModeBorder; // wrap texture coordinates tex2.addressMode[1] = hipAddressModeBorder; tex2.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex2, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex5.normalized = false; // access with normalized texture coordinates tex5.filterMode = hipFilterModeLinear; // linear interpolation tex5.addressMode[0] = hipAddressModeBorder; // wrap texture coordinates tex5.addressMode[1] = hipAddressModeBorder; tex5.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex5, d_curvolarr, channelDesc); errCu = hipMemGetInfo( &free_byte, &total_byte ) ; if ( hipSuccess != errCu ){ printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(errCu) ); exit(1); } free_db = (double)free_byte ; total_db = (double)total_byte ; used_db = total_db - free_db ; printf("GPU memory usage (after copying memory to Device): used = %f MB, free = %f MB, total = %f MB\n", used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); nOutChannel = 4; //int dim[4]; //memcpy(dim,queue.getDataDim(),sizeof(int)*4); //printf("dim = %d,%d,%d,%d\n",dim[0],dim[1],dim[2],dim[3]); if (!initalized) { imageDouble = new double[size[0]*size[1]*nOutChannel]; errCu = hipMalloc(&d_dim, 4*sizeof(int)); if ( hipSuccess != errCu ){ printf("Error in Malloc of d_dim: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMemcpy(d_dim, queue.getDataDim(), 4*sizeof(int), hipMemcpyHostToDevice); if ( hipSuccess != errCu ){ printf("Error in memcpy of d_dim: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMalloc(&d_dir1, 3*sizeof(double)); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMalloc(&d_dir2, 3*sizeof(double)); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMalloc(&d_imageDouble,sizeof(double)*size[0]*size[1]*nOutChannel); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMalloc(&d_size,2*sizeof(int)); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMemcpy(d_size,size,2*sizeof(int), hipMemcpyHostToDevice); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMalloc(&d_center,3*sizeof(double)); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } } errCu = hipMemcpy(d_dir1, dir1, 3*sizeof(double), hipMemcpyHostToDevice); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMemcpy(d_dir2, dir2, 3*sizeof(double), hipMemcpyHostToDevice); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMemcpy(d_center,center,3*sizeof(double), hipMemcpyHostToDevice); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } int numThread1D = 16; dim3 threadsPerBlock(numThread1D,numThread1D); dim3 numBlocks((size[0]+numThread1D-1)/numThread1D,(size[1]+numThread1D-1)/numThread1D); hipLaunchKernelGGL(( kernel_cpr), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_dim, d_size, verextent, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); //kernel_cpr_2chan<<<numBlocks,threadsPerBlock>>>(d_dim, d_size, verextent, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error After first kernel_cpr: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); unsigned short width = size[0]; unsigned short height = size[1]; copyImageChannel<double,short>(imageDouble,4,size[0],size[1],1,outdata+count*size[0]*size[1],1,0); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); //drawNCircle(imageQuantized,4,size[0],size[1],0, count, countline/2,countline/2); hpld->setTexture((char*)"myTextureSampler",(unsigned char *)imageQuantized,size[0],size[1],4); scene.add(hpld); vtexture.push_back(hpld); drawCircle(imageQuantized,4,size[0],size[1],0,size[0]/2,size[1]/2,20); double trackedcenter[3]; trackedcenter[0] = arr_center[count*3]; trackedcenter[1] = arr_center[count*3+1]; trackedcenter[2] = arr_center[count*3+2]; double centerdiff[3]; subtractVec(trackedcenter,center,centerdiff,3); double coorfn, coorfb, coorft; coorfn = dotProduct(centerdiff,FN,3); coorfb = dotProduct(centerdiff,FB,3); coorft = dotProduct(centerdiff,FT,3); unsigned char color[3]; color[0] = color[1] = color[2] = 128; if (coorft<-swidth/2 || coorft>swidth/2) { color[0] = color[1] = color[2] = 0; } else { color[0] = lerp(255,0,-swidth/2,coorft,swidth/2); color[1] = lerp(0,0,-swidth/2,coorft,swidth/2); color[2] = lerp(0,255,-swidth/2,coorft,swidth/2); } drawCrossWithColor(imageQuantized,4,size[0],size[1],size[0]/2+coorfn*size[1]/verextent,size[1]/2+coorfb*size[1]/verextent,20,color); initalized = 1; sprintf(outnameslice,"cpr_seq_%d.png",curnameind); //if (nrrdWrap_va(ndblpng, imageQuantized, nrrdTypeUChar, 3, 4, width, height) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // printf("error in saving cpr_seq_X.png, width=%d, height=%d\n",width,height); // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } } } /* //testing synthetic data----------------------------- printf("before testing synthetic data\n"); { size_t free_byte; size_t total_byte; double free_db; double total_db; double used_db; int count = 5; dir1[0] = 1; dir1[1] = 0; dir1[2] = 0; dir2[0] = 0; dir2[1] = 1; dir2[2] = 0; center[0] = queue.getDataDim()[1]/2; center[1] = queue.getDataDim()[2]/2; center[2] = queue.getDataDim()[3]/2; hipError_t errCu; hipChannelFormatDesc channelDesc; channelDesc = hipCreateChannelDesc<float>(); //hipArray* d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); hipArray* d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex2.normalized = false; // access with normalized texture coordinates tex2.filterMode = hipFilterModeLinear; // linear interpolation tex2.addressMode[0] = hipAddressModeBorder; // wrap texture coordinates tex2.addressMode[1] = hipAddressModeBorder; tex2.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex2, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex5.normalized = false; // access with normalized texture coordinates tex5.filterMode = hipFilterModeLinear; // linear interpolation tex5.addressMode[0] = hipAddressModeBorder; // wrap texture coordinates tex5.addressMode[1] = hipAddressModeBorder; tex5.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex5, d_curvolarr, channelDesc); errCu = hipMemGetInfo( &free_byte, &total_byte ) ; if ( hipSuccess != errCu ){ printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(errCu) ); exit(1); } free_db = (double)free_byte ; total_db = (double)total_byte ; used_db = total_db - free_db ; printf("GPU memory usage (after copying memory to Device): used = %f MB, free = %f MB, total = %f MB\n", used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); nOutChannel = 4; //int dim[4]; //memcpy(dim,queue.getDataDim(),sizeof(int)*4); //printf("dim = %d,%d,%d,%d\n",dim[0],dim[1],dim[2],dim[3]); if (!initalized) { imageDouble = new double[size[0]*size[1]*nOutChannel]; errCu = hipMalloc(&d_dim, 4*sizeof(int)); if ( hipSuccess != errCu ){ printf("Error in Malloc of d_dim: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMemcpy(d_dim, queue.getDataDim(), 4*sizeof(int), hipMemcpyHostToDevice); if ( hipSuccess != errCu ){ printf("Error in memcpy of d_dim: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMalloc(&d_dir1, 3*sizeof(double)); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMalloc(&d_dir2, 3*sizeof(double)); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMalloc(&d_imageDouble,sizeof(double)*size[0]*size[1]*nOutChannel); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMalloc(&d_size,2*sizeof(int)); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMemcpy(d_size,size,2*sizeof(int), hipMemcpyHostToDevice); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMalloc(&d_center,3*sizeof(double)); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } } errCu = hipMemcpy(d_dir1, dir1, 3*sizeof(double), hipMemcpyHostToDevice); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMemcpy(d_dir2, dir2, 3*sizeof(double), hipMemcpyHostToDevice); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMemcpy(d_center,center,3*sizeof(double), hipMemcpyHostToDevice); if ( hipSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } int numThread1D = 16; dim3 threadsPerBlock(numThread1D,numThread1D); dim3 numBlocks((size[0]+numThread1D-1)/numThread1D,(size[1]+numThread1D-1)/numThread1D); //swidth = queue.getDataDim()[3]; //verextent-=100; kernel_peak<<<numBlocks,threadsPerBlock>>>(d_dim, d_size, verextent, d_center, d_dir1, d_dir2, queue.getDataDim()[3], 0.1, nOutChannel, d_imageDouble); //kernel_cpr<<<numBlocks,threadsPerBlock>>>(d_dim, d_size, verextent, d_center, d_dir1, d_dir2, queue.getDataDim()[3], 0.05, nOutChannel, d_imageDouble); //kernel_cpr_2chan<<<numBlocks,threadsPerBlock>>>(d_dim, d_size, verextent, d_center, d_dir1, d_dir2, queue.getDataDim()[3], 0.1, nOutChannel, d_imageDouble); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error After first kernel_cpr: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); short width = size[0]; short height = size[1]; copyImageChannel<double,short>(imageDouble,4,size[0],size[1],1,outdata+count*size[0]*size[1],1,0); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); initalized = 1; sprintf(outnameslice,"synthetic.png"); if (nrrdWrap_va(ndblpng, imageQuantized, nrrdTypeUChar, 3, 4, width, height) || nrrdSave(outnameslice, ndblpng, NULL) ) { char *err = biffGetDone(NRRD); printf("%s: couldn't save output:\n%s", argv[0], err); free(err); nrrdNix(ndblpng); exit(1); } } printf("finished writing test result\n"); return; //end of testing for synthetic data------------------ */ cout<<"Before allocating output nrrd"<<endl; Nrrd *ndbl = nrrdNew(); //cout<<"Before saving output nrrd"<<endl; //if (nrrdWrap_va(ndbl, outdata, nrrdTypeShort, 3, size[0], size[1], countline) // || nrrdSave(outname,ndbl,NULL) // ) //{ // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndbl); // exit(1); //} viewer2.current(); printf("after setting viewer2.current()\n"); limnPolyData *lpldview2 = limnPolyDataNew(); limnPolyDataSquare(lpldview2, 1 << limnPolyDataInfoNorm | 1 << limnPolyDataInfoTex2); Hale::Polydata *hpldview2 = new Hale::Polydata(lpldview2, true, NULL,//Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "square"); Hale::Program *newprog2 = new Hale::Program("texdemo-vert.glsl","texdemo-frag.glsl"); //Hale::Program *newprog2 = new Hale::Program("tex-vert-cpr.glsl","texdemo-frag.glsl"); newprog2->compile(); newprog2->bindAttribute(Hale::vertAttrIdxXYZW, "positionVA"); newprog2->bindAttribute(Hale::vertAttrIdxRGBA, "colorVA"); newprog2->bindAttribute(Hale::vertAttrIdxNorm, "normalVA"); newprog2->bindAttribute(Hale::vertAttrIdxTex2, "tex2VA"); newprog2->link(); hpldview2->program(newprog2); //find lerping between 2 volumes count = 1; /* curnameind = arr_nameid[count]; sprintf(inname,"%s/%d.nrrd",pathprefix,curnameind); if (nrrdLoad(nin, inname, NULL)) { err = biffGetDone(NRRD); fprintf(stderr, "%s: trouble reading \"%s\":\n%s", me, inname, err); free(err); return; } cout<<"read file "<<inname<<endl; unsigned int pixSize; hipChannelFormatDesc channelDesc; pixSize = sizeof(float); channelDesc = hipCreateChannelDesc<float>(); if (3 != nin->dim && 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 3D space, (not %uD in %uD)\n", argv[0], nin->dim, nin->spaceDim); airMopError(mop); exit(1); } int dim[4]; if (nin->dim == 3) { dim[0] = 1; dim[1] = nin->axis[0].size; dim[2] = nin->axis[1].size; dim[3] = nin->axis[2].size; } else //4-channel { dim[0] = nin->axis[0].size; dim[1] = nin->axis[1].size; dim[2] = nin->axis[2].size; dim[3] = nin->axis[3].size; } int channel = 1; if (!filemem0) { filemem0 = new float[dim[1]*dim[2]*dim[3]]; filemem1 = new float[dim[1]*dim[2]*dim[3]]; } for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i*2]; filemem1[i] = ((short*)nin->data)[i*2+1]; } //debug for (int k=0; k<=2; k++) for (int j=0; j<=2; j++) for (int i=0; i<=2; i++) printf("volume 1: at (%d,%d,%d) = %f\n", i,j,k,filemem0[k*dim[1]*dim[2]+j*dim[1]+i]); const hipExtent volumeSize = make_hipExtent(dim[1], dim[2], dim[3]); if (!d_volumeArray[0]) hipMalloc3DArray(&d_volumeArray[0], &channelDesc, volumeSize); hipMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_hipPitchedPtr((void*)filemem0, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[0]; copyParams0.extent = volumeSize; copyParams0.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams0); */ hipChannelFormatDesc channelDesc; channelDesc = hipCreateChannelDesc<float>(); //hipArray* d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); hipArray* d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex0.normalized = false; tex0.filterMode = hipFilterModeLinear; tex0.addressMode[0] = hipAddressModeBorder; tex0.addressMode[1] = hipAddressModeBorder; tex0.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex0, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex3.normalized = false; tex3.filterMode = hipFilterModeLinear; tex3.addressMode[0] = hipAddressModeBorder; tex3.addressMode[1] = hipAddressModeBorder; tex3.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex3, d_curvolarr, channelDesc); //read second file count = 2; /* curnameind = arr_nameid[count]; sprintf(inname,"%s/%d.nrrd",pathprefix,curnameind); if (nrrdLoad(nin, inname, NULL)) { err = biffGetDone(NRRD); fprintf(stderr, "%s: trouble reading \"%s\":\n%s", me, inname, err); free(err); return; } cout<<"read file "<<inname<<endl; if (3 != nin->dim && 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 3D space, (not %uD in %uD)\n", argv[0], nin->dim, nin->spaceDim); airMopError(mop); exit(1); } for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i*2]; filemem1[i] = ((short*)nin->data)[i*2+1]; } if (!d_volumeArray[1]) hipMalloc3DArray(&d_volumeArray[1], &channelDesc, volumeSize); hipMemcpy3DParms copyParams1 = {0}; copyParams1.srcPtr = make_hipPitchedPtr((void*)filemem0, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray[1]; copyParams1.extent = volumeSize; copyParams1.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams1); */ //d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex1.normalized = false; tex1.filterMode = hipFilterModeLinear; tex1.addressMode[0] = hipAddressModeBorder; tex1.addressMode[1] = hipAddressModeBorder; tex1.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex1, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex4.normalized = false; tex4.filterMode = hipFilterModeLinear; tex4.addressMode[0] = hipAddressModeBorder; tex4.addressMode[1] = hipAddressModeBorder; tex4.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex4, d_curvolarr, channelDesc); int curinterp = (timetop[1]+timetop[2])/2; double alpha = ptofrac[curinterp]; hipError_t errCu; float *d_volmem, *d_volmem2; int *dim = queue.getDataDim(); const hipExtent volumeSize = make_hipExtent(dim[1], dim[2], dim[3]); int pixSize = sizeof(float); hipMalloc(&d_volmem,sizeof(float)*dim[1]*dim[2]*dim[3]); hipMalloc(&d_volmem2,sizeof(float)*dim[1]*dim[2]*dim[3]); int numThread1D = 8; dim3 threadsPerBlock(numThread1D,numThread1D,numThread1D); dim3 numBlocks((dim[1]+numThread1D-1)/numThread1D,(dim[2]+numThread1D-1)/numThread1D,(dim[3]+numThread1D-1)/numThread1D); //kernel_interpol<<<numBlocks,threadsPerBlock>>>(d_volmem,d_dim,alpha); hipLaunchKernelGGL(( kernel_interpol2), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_volmem,d_volmem2,d_dim,alpha); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); if (!d_volumeArray[NTEX]) { hipMalloc3DArray(&d_volumeArray[NTEX], &channelDesc, volumeSize); hipMalloc3DArray(&d_volumeArray1[NTEX], &channelDesc, volumeSize); printf("d_volumeArray[NTEX] allocated\n"); } errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error after allocating d_volumeArray[NTEX]: %s\n", hipGetErrorString(errCu)); hipMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_hipPitchedPtr((void*)d_volmem, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[NTEX]; copyParams0.extent = volumeSize; copyParams0.kind = hipMemcpyDeviceToDevice; hipMemcpy3D(&copyParams0); hipMemcpy3DParms copyParams1 = {0}; copyParams1.srcPtr = make_hipPitchedPtr((void*)d_volmem2, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray1[NTEX]; copyParams1.extent = volumeSize; copyParams1.kind = hipMemcpyDeviceToDevice; hipMemcpy3D(&copyParams1); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error after copying mem from d_volmem to d_volumeArray[NTEX]: %s\n", hipGetErrorString(errCu)); //hipArray* d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); tex2.normalized = false; tex2.filterMode = hipFilterModeLinear; tex2.addressMode[0] = hipAddressModeBorder; tex2.addressMode[1] = hipAddressModeBorder; tex2.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex2, d_volumeArray[NTEX], channelDesc); tex5.normalized = false; tex5.filterMode = hipFilterModeLinear; tex5.addressMode[0] = hipAddressModeBorder; tex5.addressMode[1] = hipAddressModeBorder; tex5.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex5, d_volumeArray1[NTEX], channelDesc); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error after copying mem from d_volmem to tex2: %s\n", hipGetErrorString(errCu)); count = 1; /* for (int i=0; i<3; i++) center[i] = cubicFilter<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //center[i] = ctmr(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); printf("center = %f %f %f\n", center[0],center[1],center[2]); */ double FT[3]; double FN[3],FB[3]; double dr[3],ddr[3]; /* for (int i=0; i<3; i++) dr[i] = cubicFilter_G<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //dr[i] = ctmr_g(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); for (int i=0; i<3; i++) ddr[i] = cubicFilter_GG<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //ddr[i] = ctmr_gg(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); */ if (kern==1) { for (int i=0; i<3; i++) { center[i] = cubicFilter<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); dr[i] = cubicFilter_G<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); ddr[i] = cubicFilter_GG<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } else { for (int i=0; i<3; i++) { center[i] = ctmr(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); dr[i] = ctmr_g(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); ddr[i] = ctmr_gg(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } printf("dr = (%f,%f,%f)\n",dr[0],dr[1],dr[2]); printf("ddr = (%f,%f,%f)\n",ddr[0],ddr[1],ddr[2]); normalize(dr,3); normalize(ddr,3); printf("after normalizing\n"); printf("dr = (%f,%f,%f)\n",dr[0],dr[1],dr[2]); printf("ddr = (%f,%f,%f)\n",ddr[0],ddr[1],ddr[2]); memcpy(FT,dr,sizeof(double)*3); memcpy(FN,eigenvec,sizeof(double)*3); normalize(FN,3); cross(FT,FN,FB); cross(FB,FT,FN); memcpy(dir1,FN,sizeof(double)*3); memcpy(dir2,FB,sizeof(double)*3); printf("Interpolation: N = %f %f %f, B = %f %f %f, T = %f %f %f, dotNB = %f, dotNT = %f, dotBT = %f\n",FN[0],FN[1],FN[2],FB[0],FB[1],FB[2],FT[0],FT[1],FT[2], dotProduct(FN,FB,3),dotProduct(FN,FT,3),dotProduct(FB,FT,3)); //hipMemcpy(d_dir1, dir1, 3*sizeof(double), hipMemcpyHostToDevice); //hipMemcpy(d_dir2, dir2, 3*sizeof(double), hipMemcpyHostToDevice); //hipMemcpy(d_center,center,3*sizeof(double), hipMemcpyHostToDevice); errCu = hipMemcpy(d_dir1, dir1, 3*sizeof(double), hipMemcpyHostToDevice); if ( hipSuccess != errCu ){ printf("Error in memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMemcpy(d_dir2, dir2, 3*sizeof(double), hipMemcpyHostToDevice); if ( hipSuccess != errCu ){ printf("Error in memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipMemcpy(d_center, center, 3*sizeof(double), hipMemcpyHostToDevice); if ( hipSuccess != errCu ){ printf("Error in memcpy: %s \n", hipGetErrorString(errCu) ); exit(1); } errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error before kernel_cpr of the first interpolated point: %s\n", hipGetErrorString(errCu)); numThread1D = 16; dim3 threadsPerBlock2(numThread1D,numThread1D); dim3 numBlocks2((size[0]+numThread1D-1)/numThread1D,(size[1]+numThread1D-1)/numThread1D); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error before kernel_cpr of the first interpolated point, after allocating blocksize: %s\n", hipGetErrorString(errCu)); hipLaunchKernelGGL(( kernel_cpr), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); //kernel_cpr_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error in kernel_cpr of the first interpolated point: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); //save nrrd file to test sprintf(outnameslice,"test.nrrd"); //if (nrrdWrap_va(ndblpng, imageDouble, nrrdTypeDouble, 3, 4, size[0], size[1]) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } short width = size[0]; short height = size[1]; //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); hpldview2->setTexture((char*)"myTextureSampler",(unsigned char *)imageQuantized,size[0],size[1],4); //add the MIP in the interpolated position viewer.current(); limnPolyData *lpld_sq_inter = limnPolyDataNew(); limnPolyDataSquare(lpld_sq_inter, 1 << limnPolyDataInfoNorm | 1 << limnPolyDataInfoTex2); Hale::Polydata *hpld_sq_inter = new Hale::Polydata(lpld_sq_inter, true, NULL, "square"); Hale::Program *newprog3 = new Hale::Program("tex-vert-cpr.glsl","texdemo-frag.glsl"); //Hale::Program *newprog3 = new Hale::Program("texdemo-vert.glsl","texdemo-frag.glsl"); newprog3->compile(); newprog3->bindAttribute(Hale::vertAttrIdxXYZW, "positionVA"); newprog3->bindAttribute(Hale::vertAttrIdxRGBA, "colorVA"); newprog3->bindAttribute(Hale::vertAttrIdxNorm, "normalVA"); newprog3->bindAttribute(Hale::vertAttrIdxTex2, "tex2VA"); newprog3->link(); hpld_sq_inter->program(newprog3); glm::mat4 tmat_sq_inter = glm::mat4(); tmat_sq_inter[0][0] = FN[0]; tmat_sq_inter[0][1] = FN[1]; tmat_sq_inter[0][2] = FN[2]; tmat_sq_inter[0][3] = 0; tmat_sq_inter[1][0] = FB[0]; tmat_sq_inter[1][1] = FB[1]; tmat_sq_inter[1][2] = FB[2]; tmat_sq_inter[1][3] = 0; tmat_sq_inter[2][0] = FT[0]; tmat_sq_inter[2][1] = FT[1]; tmat_sq_inter[2][2] = FT[2]; tmat_sq_inter[2][3] = 0; tmat_sq_inter[3][0] = center[0]; tmat_sq_inter[3][1] = center[1]; tmat_sq_inter[3][2] = center[2]; tmat_sq_inter[3][3] = 1; glm::mat4 smat_sq_inter = glm::mat4(); smat_sq_inter[0][0] = 2; smat_sq_inter[1][1] = 2; glm::mat4 fmat_sq_inter = tmat_sq_inter*smat_sq_inter; hpld_sq_inter->model(fmat_sq_inter); hpld_sq_inter->setTexture((char*)"myTextureSampler",(unsigned char *)imageQuantized,size[0],size[1],4); scene.add(hpld_sq_inter); //add a sphere for the interpolated position limnPolyData *lpld2 = limnPolyDataNew(); limnPolyDataIcoSphere(lpld2, 1 << limnPolyDataInfoNorm, 3); Hale::Polydata *hpld_inter = new Hale::Polydata(lpld2, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "IcoSphere"); hpld_inter->colorSolid(0,0,1.0); glm::mat4 fmat2 = glm::mat4(); fmat2[0][0] = spherescale_inter; fmat2[1][1] = spherescale_inter; fmat2[2][2] = spherescale_inter; fmat2[3][0] = center[0]; fmat2[3][1] = center[1]; fmat2[3][2] = center[2]; fmat2[3][3] = 1; printf("center of the first interpolated point: %f,%f,%f\n", center[0],center[1],center[2]); hpld_inter->model(fmat2); scene.add(hpld_inter); viewer2.current(); curVolInMem = 1; scene2.add(hpldview2); //scene2.add(hpld3); scene2.drawInit(); printf("after adding to scene2 and drawInit()\n"); viewer2.verbose(3); render(&viewer2); printf("after rendering viewer2\n"); viewer.current(); viewer.verbose(3); //add a testing sphere /* viewer.current(); limnPolyData *lpldtestsphere = limnPolyDataNew(); limnPolyDataIcoSphere(lpldtestsphere, 1 << limnPolyDataInfoNorm, 3); Hale::Polydata *hpldtestsphere = new Hale::Polydata(lpldtestsphere, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "IcoSphere"); hpldtestsphere->colorSolid(1.0,0,0); glm::mat4 fmat3 = glm::mat4(); fmat3[0][0] = 1; fmat3[1][1] = 1; fmat3[2][2] = 1; fmat3[3][0] = 4; fmat3[3][1] = 5; fmat3[3][2] = 5; fmat3[3][3] = 1; hpldtestsphere->model(fmat3); scene.add(hpldtestsphere); */ ///////////////////////////////// cout<<"After saving output nrrd"<<endl; scene.drawInit(); printf("after scene.drawInit()\n"); render(&viewer); printf("after render(&viewer)\n"); //getting Z-buffer printf("viewer: width = %d, height = %d, widthBuffer = %d, heightBuffer = %d\n", viewer.width(),viewer.height(),viewer.widthBuffer(),viewer.heightBuffer()); GLfloat* zbuffer = new GLfloat[viewer.widthBuffer()*viewer.heightBuffer()]; glReadPixels(0,0,viewer.widthBuffer(),viewer.heightBuffer(),GL_DEPTH_COMPONENT,GL_FLOAT,zbuffer); printf("Z-buffer\n"); int wposw = 56; int hposw = 62; double dposw = zbuffer[hposw*viewer.widthBuffer()+wposw]; printf("Before converting: wpos = %d, hpos = %d, dpos = %f\n", wposw,hposw,dposw); glm::vec4 wposworld = convertDepthBuffToWorldPos(wposw,hposw,dposw,&viewer); printf("After converting: x,y,z = %f,%f,%f\n", wposworld.x,wposworld.y,wposworld.z); glm::vec4 vpostest,wpostest; wpostest.x = arr_center[3]; wpostest.y = arr_center[4]; wpostest.z = arr_center[5]; wpostest.w = 1; vpostest = viewer.camera.view()*wpostest; printf("World Pos Test = %f,%f,%f; View Pos Test = %f,%f,%f\n", wpostest.x,wpostest.y,wpostest.z,vpostest.x,vpostest.y,vpostest.z); float minz=1000,maxz=-1000; for (int i=0; i<viewer.widthBuffer()*viewer.heightBuffer(); i++) { zbuffer[i] = linearizeDepthOrtho(lerp(-1,1,0,zbuffer[i],1),viewer.camera.clipNear(),viewer.camera.clipFar()); if (zbuffer[i]<minz) minz = zbuffer[i]; if (zbuffer[i]>maxz) maxz = zbuffer[i]; } printf("minmaxz = (%f,%f)\n",minz,maxz ); saveImage<GLfloat>(viewer.widthBuffer(),viewer.heightBuffer(),1,zbuffer,"depth.tga"); bool stateBKey = false; bool stateMKey = false; bool stateNKey = false; bool stateZoom = false; bool stateXKey = false; bool statePKey = false; bool stateDKey = false; bool stateFKey = false; bool stateIKey = false; double lastX, lastY; double verextent2 = verextent; bool isHoldOn = false; bool checkPath = false; int stateBKeyInt = 0; GLfloat* zbufferC = new GLfloat[viewer.widthBuffer()*viewer.heightBuffer()]; unsigned char *zbufferM = new unsigned char[viewer.widthBuffer()*viewer.heightBuffer()]; Nrrd *zbufferNin = nrrdNew(); Nrrd *zbufferDis = nrrdNew(); //saving approximately equidistant images for constructing space-time visualization /* for (int curptmp = 0; curptmp<countls; curptmp++) { interpolVolAndRender(curVolInMem, ptotime[curptmp], ptofrac[curptmp], queue, arr_nameid, arr_center, pathprefix, mop, pixSize, dim, size, eigenvec, verextent2, swidth, sstep, nOutChannel, d_volmem, d_dim, d_size, d_dir1, d_dir2, d_center, imageDouble, d_imageDouble, imageQuantized, viewer, viewer2, hpldview2, hpld_inter, spherescale_inter, hpld_sq_inter); sprintf(outnameslice,"spacetime_hp/im_%d.png",curptmp); if (nrrdWrap_va(ndblpng, imageQuantized, nrrdTypeUChar, 3, 4, width, height) || nrrdSave(outnameslice, ndblpng, NULL) ) { char *err = biffGetDone(NRRD); printf("%s: couldn't save output:\n%s", argv[0], err); free(err); nrrdNix(ndblpng); exit(1); } } */ int tmpcount = 0; //clock_t begin = clock(); time_t start,end,starti,endi; time (&start); time (&starti); //used for rotating the axis in the second window glm::vec3 preFrom = viewer2.camera.from(); glm::vec3 preAt = viewer2.camera.at(); glm::vec3 preUp = viewer2.camera.up(); //considers (from,at) as y, up as z, and x is side vector (right-handed coordinates) glm::vec3 preZ = glm::normalize(preUp); glm::vec3 preY = glm::normalize(preAt-preFrom); glm::vec3 preX = glm::cross(preY,preZ); //inverse (transpose) of the coordinates (glm matrix is initialized by column-major) glm::mat3 preCoI = glm::mat3(preX[0],preY[0],preZ[0], preX[1],preY[1],preZ[1], preX[2],preY[2],preZ[2]); while(!Hale::finishing){ glfwWaitEvents(); int keyPressed = viewer.getKeyPressed(); int keyPressed2 = viewer2.getKeyPressed(); if (keyPressed == GLFW_KEY_LEFT) { if (curinterp>0) { curinterp--; interpolVolAndRender(curVolInMem, ptotime[curinterp], ptofrac[curinterp], queue, arr_nameid, arr_center, pathprefix, mop, pixSize, dim, size, eigenvec, verextent2, swidth, sstep, nOutChannel, d_volmem, d_volmem2, d_dim, d_size, d_dir1, d_dir2, d_center, imageDouble, d_imageDouble, imageQuantized, viewer, viewer2, hpldview2, hpld_inter, spherescale_inter, hpld_sq_inter, statePKey, kern, stateIKey); } } else if (keyPressed == GLFW_KEY_RIGHT) { if (curinterp<countls-1) { curinterp++; interpolVolAndRender(curVolInMem, ptotime[curinterp], ptofrac[curinterp], queue, arr_nameid, arr_center, pathprefix, mop, pixSize, dim, size, eigenvec, verextent2, swidth, sstep, nOutChannel, d_volmem, d_volmem2, d_dim, d_size, d_dir1, d_dir2, d_center, imageDouble, d_imageDouble, imageQuantized, viewer, viewer2, hpldview2, hpld_inter, spherescale_inter, hpld_sq_inter, statePKey, kern, stateIKey); } } if (stateBKey!=viewer.getStateBKey()) { stateBKey = viewer.getStateBKey(); stateBKeyInt = (stateBKeyInt+1)%3; if (stateBKeyInt == 1) { scene.remove(hpld4); scene.add(hpld3); } else if (stateBKeyInt == 2) { scene.remove(hpld3); } else { //scene.remove(hpld3); scene.add(hpld4); } } if (keyPressed == 'M') { stateMKey = !stateMKey; if (stateMKey) { for (int i=0; i<vtexture.size(); i++) scene.remove(vtexture[i]); } else { for (int i=0; i<vtexture.size(); i++) scene.add(vtexture[i]); } } if (keyPressed == 'D') { stateDKey = !stateDKey; if (stateDKey) { scene.remove(hpld_inter); } else { scene.add(hpld_inter); } } if (keyPressed == 'F') { stateFKey = !stateFKey; if (stateFKey) { scene.remove(hpld_sq_inter); } else { scene.add(hpld_sq_inter); } } if (keyPressed == 'N') { stateNKey = !stateNKey; if (stateNKey) { for (int i=0; i<vsphereorig.size(); i++) scene.remove(vsphereorig[i]); scene.remove(hpldorig); } else { for (int i=0; i<vsphereorig.size(); i++) scene.add(vsphereorig[i]); scene.add(hpldorig); } } if (keyPressed == 'X') { stateXKey = !stateXKey; if (stateXKey) { for (int i=0; i<vsphere.size(); i++) scene.remove(vsphere[i]); } else { for (int i=0; i<vsphere.size(); i++) scene.add(vsphere[i]); } } if (keyPressed2 == 'I') { time (&endi); double dif = difftime (endi,starti); if (dif>0.3) { time (&starti); stateIKey = !stateIKey; if (statePKey) { printf("statePKey = True, doing kernel_peak, tmpcount = %d\n",tmpcount); if (stateIKey) hipLaunchKernelGGL(( kernel_peak_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_peak), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); //debug //sprintf(outnameslice,"test_peak.nrrd"); //if (nrrdWrap_va(ndblpng, imageDouble, nrrdTypeDouble, 3, 4, size[0], size[1]) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } else { printf("statePKey = false, doing kernel_cpr, tmpcount = %d\n",tmpcount); if (stateIKey) hipLaunchKernelGGL(( kernel_cpr_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_cpr), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } /* if (!statePKey) { if (stateIKey) hipLaunchKernelGGL(( kernel_cpr_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_cpr), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } else { if (stateIKey) hipLaunchKernelGGL(( kernel_peak_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_peak), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); //} */ } } //switching between maxima and MIP in window2 if (keyPressed2 == 'P') { tmpcount++; //clock_t end = clock(); //double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; time (&end); double dif = difftime (end,start); printf("dif time = %f\n", dif); if (dif>0.3) { time (&start); statePKey = !statePKey; printf("statePKey = %d\n",statePKey); if (statePKey) { printf("statePKey = True, doing kernel_peak, tmpcount = %d\n",tmpcount); if (stateIKey) hipLaunchKernelGGL(( kernel_peak_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_peak), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); //debug //sprintf(outnameslice,"test_peak.nrrd"); //if (nrrdWrap_va(ndblpng, imageDouble, nrrdTypeDouble, 3, 4, size[0], size[1]) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } else { printf("statePKey = false, doing kernel_cpr, tmpcount = %d\n",tmpcount); if (stateIKey) hipLaunchKernelGGL(( kernel_cpr_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_cpr), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } } } //increasing & decreasing width of the slab if (keyPressed2 == GLFW_KEY_MINUS || keyPressed2 == GLFW_KEY_EQUAL) { tmpcount++; //clock_t end = clock(); //double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; time (&end); double dif = difftime (end,start); printf("dif time = %f\n", dif); if (dif>0.3) { time (&start); if (keyPressed2 == GLFW_KEY_MINUS) swidth-=2; else swidth+=2; if (statePKey) { if (stateIKey) hipLaunchKernelGGL(( kernel_peak_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_peak), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); //debug sprintf(outnameslice,"test_peak.nrrd"); //if (nrrdWrap_va(ndblpng, imageDouble, nrrdTypeDouble, 3, 4, size[0], size[1]) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } else { if (stateIKey) hipLaunchKernelGGL(( kernel_cpr_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_cpr), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } } } //processing zooming in the second window (MIP image) if (stateZoom) { if (!viewer2.getButton(0)) { /* double curY = viewer2.getLastY(); int heightBuff = viewer2.heightBuffer(); double pcent = (curY-lastY)/heightBuff; printf("percent zoom = %f (curY = %f, lastY = %f, heightBuff = %d)\n", pcent, curY,lastY,heightBuff); stateZoom = false; verextent2 = verextent2*(1+pcent); hipLaunchKernelGGL(( kernel_cpr), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); */ stateZoom = false; } else if (std::isnan(lastY)) lastY = viewer2.getLastY(); else { double curY = viewer2.getLastY(); int heightBuff = viewer2.heightBuffer(); double pcent = (curY-lastY)/heightBuff; printf("percent zoom = %f (curY = %f, lastY = %f, heightBuff = %d)\n", pcent, curY,lastY,heightBuff); //stateZoom = false; verextent2 = verextent2*(1+pcent); if (statePKey) { // hipLaunchKernelGGL(( kernel_peak), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); if (stateIKey) hipLaunchKernelGGL(( kernel_peak_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_peak), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } else { if (stateIKey) hipLaunchKernelGGL(( kernel_cpr_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_cpr), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); if (statePKey) quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); else quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); lastY = curY; } } else if (viewer2.getButton(0) && viewer2.getMode()==Hale::viewerModeZoom) { stateZoom = true; lastY = viewer2.getLastY(); printf("Begin zooming: lastY = %f\n",lastY); } if (viewer.getButton(0)) { if (!isHoldOn) { glReadPixels(0,0,viewer.widthBuffer(),viewer.heightBuffer(),GL_DEPTH_COMPONENT,GL_FLOAT,zbufferC); //testing // sprintf(outnameslice,"depth.nrrd"); // if (nrrdWrap_va(ndblpng, zbufferC, nrrdTypeFloat, 2, viewer.widthBuffer(), viewer.heightBuffer()) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } //end of testing int wposwC = viewer.getClickedX(); int hposwC = viewer.heightBuffer()-viewer.getClickedY(); double dposwC = zbufferC[hposwC*viewer.widthBuffer()+wposwC]; printf("First Clicked (w,h,depth) = %d,%d,%f\n", wposwC,hposwC,dposwC); glm::vec4 wposviewC = convertDepthBuffToViewPos(wposwC,hposwC,dposwC,&viewer); printf("First Clicked View Pos = %f,%f,%f\n", wposviewC.x,wposviewC.y,wposviewC.z); //debug //glm::vec4 testpostview = convertWorldToViewPos(4,5,4,&viewer); //printf("View Pos of World Pos (4,5,4) is = %f,%f,%f\n", testpostview.x,testpostview.y,testpostview.z); //// isHoldOn = true; checkPath = (dposwC<1.0); viewer.setPaused(checkPath); if (checkPath) { printf("Inside checkPath of first click\n"); for (int ii=0; ii<viewer.widthBuffer()*viewer.heightBuffer(); ii++) { if (zbufferC[ii]!=1.0) zbufferM[ii] = 255; else zbufferM[ii] = 0; } printf("after assigning zbufferM\n"); if (nrrdWrap_va(zbufferNin, zbufferM, nrrdTypeUChar, 2, viewer.widthBuffer(), viewer.heightBuffer()) ) { char *err = biffGetDone(NRRD); printf("%s: Error wrapping Nrrd:\n%s", argv[0], err); free(err); nrrdNix(zbufferNin); exit(1); } if (nrrdDistanceL2Signed(zbufferDis, zbufferNin, nrrdTypeFloat, NULL, 128, AIR_FALSE)) { char *err = biffGetDone(NRRD); printf("%s: Error doing distance transform:\n%s", argv[0], err); free(err); nrrdNix(zbufferDis); exit(1); } sprintf(outnameslice,"dis_trans.png"); //if (nrrdSave(outnameslice, zbufferDis, NULL)) { // char *err = biffGetDone(NRRD); // printf("%s: Error saving distance transform:\n%s", argv[0], err); // free(err); nrrdNix(zbufferDis); // exit(1); //} } } if (checkPath) { //GLfloat* zbuffer = new GLfloat[viewer.widthBuffer()*viewer.heightBuffer()]; //glReadPixels(0,0,viewer.widthBuffer(),viewer.heightBuffer(),GL_DEPTH_COMPONENT,GL_FLOAT,zbuffer); int wposw = viewer.getLastX(); int hposw = viewer.heightBuffer()-viewer.getLastY(); double disgrad[2]; disgrad[0] = disgrad[1] = 0; if (wposw<viewer.widthBuffer()-1 && wposw>0 && hposw<viewer.heightBuffer()-1 && hposw>0) { disgrad[0] = -(((float*)zbufferDis->data)[hposw*viewer.widthBuffer()+wposw+1]-((float*)zbufferDis->data)[hposw*viewer.widthBuffer()+wposw-1]); disgrad[1] = -(((float*)zbufferDis->data)[(hposw+1)*viewer.widthBuffer()+wposw]-((float*)zbufferDis->data)[(hposw-1)*viewer.widthBuffer()+wposw]); if (disgrad[0] || disgrad[1]) { printf("after assigning, disgrad = %f,%f\n",disgrad[0],disgrad[1]); normalize(disgrad,2); printf("after normalizing, disgrad = %f,%f\n",disgrad[0],disgrad[1]); double disval = ((float*)zbufferDis->data)[hposw*viewer.widthBuffer()+wposw]; printf("old wposw,hposw = %d,%d; disval = %f\n", wposw,hposw,disval); wposw += (disval*disgrad[0]); hposw += (disval*disgrad[1]); printf("jumped wposw,hposw = %d,%d\n", wposw,hposw); } } double dposw = zbufferC[hposw*viewer.widthBuffer()+wposw]; printf("Drag Clicked (w,h,depth) = %d,%d,%f\n", wposw,hposw,dposw); glm::vec4 wposview = convertDepthBuffToViewPos(wposw,hposw,dposw,&viewer); printf("Drag Clicked View Pos = %f,%f,%f\n", wposview.x,wposview.y,wposview.z); if (dposw<1.0 || disgrad[0] || disgrad[1]) { double dismin = INT_MAX; int mini = -1; for (int i=1; i<countline-3; i++) { glm::vec4 curposview = convertWorldToViewPos(arr_center[i*3+0],arr_center[i*3+1],arr_center[i*3+2],&viewer); //test using on x and y in view coordinate to find the closest point (not using z) //double dis1 = diss2P(wposview.x,wposview.y,wposview.z,curposview.x,curposview.y,curposview.z); glm::vec4 curposview2 = convertWorldToViewPos(arr_center[(i+1)*3+0],arr_center[(i+1)*3+1],arr_center[(i+1)*3+2],&viewer); double vec1[3],vec2[3]; vec1[0] = curposview.x-wposview.x; vec1[1] = curposview.y-wposview.y; vec1[2] = 0; vec2[0] = curposview2.x-wposview.x; vec2[1] = curposview2.y-wposview.y; vec2[2] = 0; normalize(vec1,3); normalize(vec2,3); double angle = computeAngle(vec1,vec2); printf("Point %d (%d), View pos 1 = %f,%f,%f; View pos 2 = %f,%f,%f; angle = %f\n",i,arr_nameid[i],curposview.x,curposview.y,curposview.z,curposview2.x,curposview2.y,curposview2.z, angle); if (angle > 100) { double dis1 = diss2P(wposview.x,wposview.y,0,curposview.x,curposview.y,0); //double dis2 = diss2P(wposview.x,wposview.y,wposview.z,curposview.x,curposview.y,curposview.z); double dis2 = diss2P(wposview.x,wposview.y,0,curposview2.x,curposview2.y,0); printf("Point %d (%d) View Pos = %f,%f,%f, dis = %f\n",i,arr_nameid[i], curposview.x,curposview.y,curposview.z,dis1); if (dis1+dis2<dismin) { dismin = dis1+dis2; mini = i; } } } if (mini>0) { //int numsample = 20; dismin = INT_MAX; double mint = -1; /* for (int i=0; i<=numsample; i++) { double t = (double)i/(double)numsample; double center[3]; for (int j=0; j<3; j++) center[j] = cubicFilter<double>(t, arr_center[(mini-1)*3+j], arr_center[(mini)*3+j], arr_center[(mini+1)*3+j], arr_center[(mini+2)*3+j]); glm::vec4 curposview = convertWorldToViewPos(center[0],center[1],center[2],&viewer); //double dis = diss2P(wposview.x,wposview.y,wposview.z,curposview.x,curposview.y,curposview.z); double dis = diss2P(wposview.x,wposview.y,1,curposview.x,curposview.y,1); if (dis<dismin) { dismin = dis; mint = t; } } */ int curp = timetop[mini]; while (curp<countls && ptotime[curp]==mini) { glm::vec4 curposview = convertWorldToViewPos(lpld3->xyzw[curp*4+0],lpld3->xyzw[curp*4+1],lpld3->xyzw[curp*4+2],&viewer); double dis = diss2P(wposview.x,wposview.y,1,curposview.x,curposview.y,1); if (dis<dismin) { dismin = dis; mint = ptofrac[curp]; curinterp = curp; } curp++; } if (curVolInMem != mini) { curVolInMem = mini; //find lerping between 2 volumes count = mini; /* curnameind = arr_nameid[count]; sprintf(inname,"%s/%d.nrrd",pathprefix,curnameind); if (nrrdLoad(nin, inname, NULL)) { err = biffGetDone(NRRD); fprintf(stderr, "%s: trouble reading \"%s\":\n%s", me, inname, err); free(err); return; } cout<<"read file "<<inname<<endl; unsigned int pixSize; hipChannelFormatDesc channelDesc; pixSize = sizeof(float); channelDesc = hipCreateChannelDesc<float>(); if (3 != nin->dim && 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 3D space, (not %uD in %uD)\n", argv[0], nin->dim, nin->spaceDim); airMopError(mop); exit(1); } int dim[4]; if (nin->dim == 3) { dim[0] = 1; dim[1] = nin->axis[0].size; dim[2] = nin->axis[1].size; dim[3] = nin->axis[2].size; } else //4-channel { dim[0] = nin->axis[0].size; dim[1] = nin->axis[1].size; dim[2] = nin->axis[2].size; dim[3] = nin->axis[3].size; } int channel = 1; for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i*2]; filemem1[i] = ((short*)nin->data)[i*2+1]; } //debug for (int k=0; k<=2; k++) for (int j=0; j<=2; j++) for (int i=0; i<=2; i++) printf("volume 1: at (%d,%d,%d) = %f\n", i,j,k,filemem0[k*dim[1]*dim[2]+j*dim[1]+i]); const hipExtent volumeSize = make_hipExtent(dim[1], dim[2], dim[3]); if (!d_volumeArray[0]) hipMalloc3DArray(&d_volumeArray[0], &channelDesc, volumeSize); hipMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_hipPitchedPtr((void*)filemem0, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[0]; copyParams0.extent = volumeSize; copyParams0.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams0); */ hipError_t errCu; hipChannelFormatDesc channelDesc; channelDesc = hipCreateChannelDesc<float>(); //hipArray* d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); hipArray* d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex0.normalized = false; tex0.filterMode = hipFilterModeLinear; tex0.addressMode[0] = hipAddressModeBorder; tex0.addressMode[1] = hipAddressModeBorder; tex0.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex0, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex3.normalized = false; tex3.filterMode = hipFilterModeLinear; tex3.addressMode[0] = hipAddressModeBorder; tex3.addressMode[1] = hipAddressModeBorder; tex3.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex3, d_curvolarr, channelDesc); //read second file count = mini+1; /* curnameind = arr_nameid[count]; sprintf(inname,"%s/%d.nrrd",pathprefix,curnameind); if (nrrdLoad(nin, inname, NULL)) { err = biffGetDone(NRRD); fprintf(stderr, "%s: trouble reading \"%s\":\n%s", me, inname, err); free(err); return; } cout<<"read file "<<inname<<endl; if (3 != nin->dim && 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 3D space, (not %uD in %uD)\n", argv[0], nin->dim, nin->spaceDim); airMopError(mop); exit(1); } for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i*2]; filemem1[i] = ((short*)nin->data)[i*2+1]; } if (!d_volumeArray[1]) hipMalloc3DArray(&d_volumeArray[1], &channelDesc, volumeSize); hipMemcpy3DParms copyParams1 = {0}; copyParams1.srcPtr = make_hipPitchedPtr((void*)filemem0, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray[1]; copyParams1.extent = volumeSize; copyParams1.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams1); */ //d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex1.normalized = false; tex1.filterMode = hipFilterModeLinear; tex1.addressMode[0] = hipAddressModeBorder; tex1.addressMode[1] = hipAddressModeBorder; tex1.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex1, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex4.normalized = false; tex4.filterMode = hipFilterModeLinear; tex4.addressMode[0] = hipAddressModeBorder; tex4.addressMode[1] = hipAddressModeBorder; tex4.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex4, d_curvolarr, channelDesc); } int numThread1D; alpha = mint; numThread1D = 8; dim3 threadsPerBlock(numThread1D,numThread1D,numThread1D); dim3 numBlocks((dim[1]+numThread1D-1)/numThread1D,(dim[2]+numThread1D-1)/numThread1D,(dim[3]+numThread1D-1)/numThread1D); double alpha = mint; //kernel_interpol<<<numBlocks,threadsPerBlock>>>(d_volmem,d_dim,alpha); hipLaunchKernelGGL(( kernel_interpol2), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_volmem,d_volmem2,d_dim,alpha); hipError_t errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error After kernel_nterpol when clicking: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync After kernel_nterpol when clicking: %s\n", hipGetErrorString(errCu)); //copy from device's global mem to texture mem copyParams0.srcPtr = make_hipPitchedPtr((void*)d_volmem, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[NTEX]; copyParams0.extent = volumeSize; copyParams0.kind = hipMemcpyDeviceToDevice; hipMemcpy3D(&copyParams0); tex2.normalized = false; tex2.filterMode = hipFilterModeLinear; tex2.addressMode[0] = hipAddressModeBorder; tex2.addressMode[1] = hipAddressModeBorder; tex2.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex2, d_volumeArray[NTEX], channelDesc); copyParams1.srcPtr = make_hipPitchedPtr((void*)d_volmem2, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray1[NTEX]; copyParams1.extent = volumeSize; copyParams1.kind = hipMemcpyDeviceToDevice; hipMemcpy3D(&copyParams1); tex5.normalized = false; tex5.filterMode = hipFilterModeLinear; tex5.addressMode[0] = hipAddressModeBorder; tex5.addressMode[1] = hipAddressModeBorder; tex5.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex5, d_volumeArray1[NTEX], channelDesc); //after that call the normal kernel to do MIP count = mini; /* for (int i=0; i<3; i++) center[i] = cubicFilter<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //center[i] = ctmr(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); printf("center = %f %f %f\n", center[0],center[1],center[2]); */ double FT[3]; double FN[3],FB[3]; double dr[3],ddr[3]; /* for (int i=0; i<3; i++) dr[i] = cubicFilter_G<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //dr[i] = ctmr_g(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); for (int i=0; i<3; i++) ddr[i] = cubicFilter_GG<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //ddr[i] = ctmr_gg(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); */ if (kern==1) { for (int i=0; i<3; i++) { center[i] = cubicFilter<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); dr[i] = cubicFilter_G<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); ddr[i] = cubicFilter_GG<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } else { for (int i=0; i<3; i++) { center[i] = ctmr(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); dr[i] = ctmr_g(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); ddr[i] = ctmr_gg(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } printf("dr = (%f,%f,%f)\n",dr[0],dr[1],dr[2]); printf("ddr = (%f,%f,%f)\n",ddr[0],ddr[1],ddr[2]); normalize(dr,3); normalize(ddr,3); printf("after normalizing\n"); printf("dr = (%f,%f,%f)\n",dr[0],dr[1],dr[2]); printf("ddr = (%f,%f,%f)\n",ddr[0],ddr[1],ddr[2]); memcpy(FT,dr,sizeof(double)*3); memcpy(FN,eigenvec,sizeof(double)*3); normalize(FN,3); cross(FT,FN,FB); cross(FB,FT,FN); memcpy(dir1,FN,sizeof(double)*3); memcpy(dir2,FB,sizeof(double)*3); printf("Interpolation: N = %f %f %f, B = %f %f %f, T = %f %f %f, dotNB = %f, dotNT = %f, dotBT = %f\n",FN[0],FN[1],FN[2],FB[0],FB[1],FB[2],FT[0],FT[1],FT[2], dotProduct(FN,FB,3),dotProduct(FN,FT,3),dotProduct(FB,FT,3)); hipMemcpy(d_dir1, dir1, 3*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_dir2, dir2, 3*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_center,center,3*sizeof(double), hipMemcpyHostToDevice); numThread1D = 16; dim3 threadsPerBlock2(numThread1D,numThread1D); dim3 numBlocks2((size[0]+numThread1D-1)/numThread1D,(size[1]+numThread1D-1)/numThread1D); //if (statePKey) // hipLaunchKernelGGL(( kernel_peak), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); //else //kernel_cpr<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); // hipLaunchKernelGGL(( kernel_cpr_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); if (statePKey) { if (stateIKey) hipLaunchKernelGGL(( kernel_peak_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_peak), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } else { if (stateIKey) hipLaunchKernelGGL(( kernel_cpr_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_cpr), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error After kernel_cpr when clicking: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync After kernel_cpr when clicking: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); short width = size[0]; short height = size[1]; //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); if (statePKey) quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); else quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); printf("alpha = %f",alpha); printf("center of the previous interpolated point: %f,%f,%f\n", fmat2[3][0],fmat2[3][1],fmat2[3][2]); printf("center of the last interpolated point: %f,%f,%f\n", center[0],center[1],center[2]); fmat2[3][0] = center[0]; fmat2[3][1] = center[1]; fmat2[3][2] = center[2]; hpld_inter->model(fmat2); //update the local texture frame in first viewer viewer.current(); glm::mat4 tmat_sq_inter = glm::mat4(); tmat_sq_inter[0][0] = FN[0]; tmat_sq_inter[0][1] = FN[1]; tmat_sq_inter[0][2] = FN[2]; tmat_sq_inter[0][3] = 0; tmat_sq_inter[1][0] = FB[0]; tmat_sq_inter[1][1] = FB[1]; tmat_sq_inter[1][2] = FB[2]; tmat_sq_inter[1][3] = 0; tmat_sq_inter[2][0] = FT[0]; tmat_sq_inter[2][1] = FT[1]; tmat_sq_inter[2][2] = FT[2]; tmat_sq_inter[2][3] = 0; tmat_sq_inter[3][0] = center[0]; tmat_sq_inter[3][1] = center[1]; tmat_sq_inter[3][2] = center[2]; tmat_sq_inter[3][3] = 1; glm::mat4 smat_sq_inter = glm::mat4(); smat_sq_inter[0][0] = 2; smat_sq_inter[1][1] = 2; glm::mat4 fmat_sq_inter = tmat_sq_inter*smat_sq_inter; hpld_sq_inter->model(fmat_sq_inter); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } } } } else { isHoldOn = false; checkPath = false; viewer.setPaused(false); } //rotation in the second window printf("testing view params from window 1\n"); printf("curFrom = %f,%f,%f\n",viewer.camera.from()[0],viewer.camera.from()[1],viewer.camera.from()[2]); printf("curAt = %f,%f,%f\n",viewer.camera.at()[0],viewer.camera.at()[1],viewer.camera.at()[2]); printf("curUp = %f,%f,%f\n",viewer.camera.up()[0],viewer.camera.up()[1],viewer.camera.up()[2]); printf("before processing the rotation in second window++++++++++++++++++++++++\n"); printf("preFrom = %f,%f,%f\n",preFrom[0],preFrom[1],preFrom[2]); printf("curFrom = %f,%f,%f\n",viewer2.camera.from()[0],viewer2.camera.from()[1],viewer2.camera.from()[2]); printf("preAt = %f,%f,%f\n",preAt[0],preAt[1],preAt[2]); printf("curAt = %f,%f,%f\n",viewer2.camera.at()[0],viewer2.camera.at()[1],viewer2.camera.at()[2]); printf("preUp = %f,%f,%f\n",preUp[0],preUp[1],preUp[2]); printf("curUp = %f,%f,%f\n",viewer2.camera.up()[0],viewer2.camera.up()[1],viewer2.camera.up()[2]); if ((viewer2.camera.from() != preFrom || viewer2.camera.at()!=preAt || viewer2.camera.up()!=preUp)) //&& (viewer2.isMouseReleased())) { printf("changed viewing angles in view2------------------------------------\n"); glm::vec3 curZ = glm::normalize(viewer2.camera.up()); glm::vec3 curY = glm::normalize(viewer2.camera.at()-viewer2.camera.from()); glm::vec3 curX = glm::cross(curY,curZ); glm::mat3 curCo = glm::mat3(curX[0],curX[1],curX[2], curY[0],curY[1],curY[2], curZ[0],curZ[1],curZ[2]); glm::mat3 curTrans = curCo*preCoI; preCoI = glm::transpose(curCo); preFrom = viewer2.camera.from(); preAt = viewer2.camera.at(); preUp = viewer2.camera.up(); double FT[3]; cross(dir1,dir2,FT); glm::mat3 preCo2(FT[0],FT[1],FT[2], dir1[0],dir1[1],dir1[2], dir2[0],dir2[1],dir2[2]); glm::mat3 curCo2 = curTrans*preCo2; dir1[0] = curCo2[1][0]; dir1[1] = curCo2[1][1]; dir1[2] = curCo2[1][2]; dir2[0] = curCo2[2][0]; dir2[1] = curCo2[2][1]; dir2[2] = curCo2[2][2]; FT[0] = curCo2[0][0]; FT[1] = curCo2[0][1]; FT[2] = curCo2[0][2]; hipMemcpy(d_dir1, dir1, 3*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_dir2, dir2, 3*sizeof(double), hipMemcpyHostToDevice); if (statePKey) { if (stateIKey) hipLaunchKernelGGL(( kernel_peak_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_peak), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); //debug //sprintf(outnameslice,"test_peak.nrrd"); //if (nrrdWrap_va(ndblpng, imageDouble, nrrdTypeDouble, 3, 4, size[0], size[1]) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); //viewer.current(); //hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } else { if (stateIKey) hipLaunchKernelGGL(( kernel_cpr_2chan), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else hipLaunchKernelGGL(( kernel_cpr), dim3(numBlocks2),dim3(threadsPerBlock2), 0, 0, d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = hipGetLastError(); if (errCu != hipSuccess) printf("Error: %s\n", hipGetErrorString(errCu)); errCu = hipDeviceSynchronize(); if (errCu != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(errCu)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); //viewer.current(); //hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } viewer.current(); glm::mat4 tmat_sq_inter = glm::mat4(); tmat_sq_inter[0][0] = dir1[0]; tmat_sq_inter[0][1] = dir1[1]; tmat_sq_inter[0][2] = dir1[2]; tmat_sq_inter[0][3] = 0; tmat_sq_inter[1][0] = dir2[0]; tmat_sq_inter[1][1] = dir2[1]; tmat_sq_inter[1][2] = dir2[2]; tmat_sq_inter[1][3] = 0; tmat_sq_inter[2][0] = FT[0]; tmat_sq_inter[2][1] = FT[1]; tmat_sq_inter[2][2] = FT[2]; tmat_sq_inter[2][3] = 0; tmat_sq_inter[3][0] = center[0]; tmat_sq_inter[3][1] = center[1]; tmat_sq_inter[3][2] = center[2]; tmat_sq_inter[3][3] = 1; glm::mat4 smat_sq_inter = glm::mat4(); smat_sq_inter[0][0] = 2; smat_sq_inter[1][1] = 2; glm::mat4 fmat_sq_inter = tmat_sq_inter*smat_sq_inter; hpld_sq_inter->model(fmat_sq_inter); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } viewer.current(); render(&viewer); viewer2.current(); render(&viewer2); viewer.current(); printf("end of an event loop\n"); printf("viewer: buffer = %d %d, window = %d %d\n", viewer.widthBuffer(), viewer.heightBuffer(), viewer.width(), viewer.height()); printf("viewer2: buffer = %d %d, window = %d %d\n", viewer2.widthBuffer(), viewer2.heightBuffer(), viewer2.width(), viewer2.height()); } /* clean exit; all okay */ Hale::done(); airMopOkay(mop); return 0; }
f7e36f32f2c2f8092a69670ad839df048545d88e.cu
#include <iostream> #include <Hale.h> #include <glm/glm.hpp> #include "unistd.h" // for sleep() #include <fstream> #include <cuda_runtime.h> #include <cuda.h> #include "lib/Image.h" #include <vector> #include <unordered_map> #include <time.h> using namespace std; //from cuda_volume_rendering #define PI 3.14159265 #define MAX(a,b) ((a)>(b)?(a):(b)) texture<float, 3, cudaReadModeElementType> tex0; // 3D texture texture<float, 3, cudaReadModeElementType> tex1; // 3D texture texture<float, 3, cudaReadModeElementType> tex2; // 3D texture texture<float, 3, cudaReadModeElementType> tex3; // 3D texture texture<float, 3, cudaReadModeElementType> tex4; // 3D texture texture<float, 3, cudaReadModeElementType> tex5; // 3D texture /* cudaArray *d_volumeArray0 = 0; cudaArray *d_volumeArray1 = 0; cudaArray *d_volumeArray2 = 0; */ #define NTEX 3 //texture<float, 3, cudaReadModeElementType> tex[NTEX]; //+1 for an extra volume for interpolation cudaArray *d_volumeArray[NTEX+1]; cudaArray *d_volumeArray1[NTEX+1]; //range for quantizing double range[] = {0,1,0,1600,0,3300,0,1}; double range_p[] = {0,1,0,1,0,1,0,1}; #define CLIP(x,a,b) ((x)<(a)?(a):((x)>(b)?(b):(x))) //ctmr filter double ctmr_kern(double x) { if (x<-2) return 0; if (x<-1) return 0.5*(4+8*x+5*x*x+x*x*x); if (x<0) return 0.5*(2-5*x*x-3*x*x*x); if (x<1) return 0.5*(2-5*x*x+3*x*x*x); if (x<2) return 0.5*(4-8*x+5*x*x-x*x*x); return 0; } double ctmr_kern_g(double x) { if (x<-2) return 0; if (x<-1) return 0.5*(3*x*x+10*x+8); if (x<0) return 0.5*(-9*x*x-10*x); if (x<1) return 0.5*(9*x*x-10*x); if (x<2) return 0.5*(-3*x*x+10*x-8); return 0; } double ctmr_kern_gg(double x) { if (x<-2) return 0; if (x<-1) return 0.5*(6*x+10); if (x<0) return 0.5*(-18*x-10); if (x<1) return 0.5*(18*x-10); if (x<2) return 0.5*(-6*x+10); return 0; } //centered at a1, x in [0,1) double ctmr(double x, double a0, double a1, double a2, double a3) { double res = 0; res = a0*ctmr_kern(x+1)+a1*ctmr_kern(x)+a2*ctmr_kern(x-1)+a3*ctmr_kern(x-2); return res; } double ctmr_g(double x, double a0, double a1, double a2, double a3) { double res = 0; res = a0*ctmr_kern_g(x+1)+a1*ctmr_kern_g(x)+a2*ctmr_kern_g(x-1)+a3*ctmr_kern_g(x-2); return res; } double ctmr_gg(double x, double a0, double a1, double a2, double a3) { double res = 0; res = a0*ctmr_kern_gg(x+1)+a1*ctmr_kern_gg(x)+a2*ctmr_kern_gg(x-1)+a3*ctmr_kern_gg(x-2); return res; } // w0, w1, w2, and w3 are the four cubic B-spline basis functions __host__ __device__ float w0(float a) { return (1.0f/6.0f)*(a*(a*(-a + 3.0f) - 3.0f) + 1.0f); } __host__ __device__ float w1(float a) { return (1.0f/6.0f)*(a*a*(3.0f*a - 6.0f) + 4.0f); } __host__ __device__ float w2(float a) { return (1.0f/6.0f)*(a*(a*(-3.0f*a + 3.0f) + 3.0f) + 1.0f); } __host__ __device__ float w3(float a) { return (1.0f/6.0f)*(a*a*a); } //derivatives of basic functions __host__ __device__ float w0g(float a) { return -(1.0f/2.0f)*a*a + a - (1.0f/2.0f); } __host__ __device__ float w1g(float a) { return (3.0f/2.0f)*a*a - 2*a; } __host__ __device__ float w2g(float a) { return -(3.0f/2.0f)*a*a + a + (1.0/2.0); } __host__ __device__ float w3g(float a) { return (1.0f/2.0f)*a*a; } //second derivatives of basic functions __host__ __device__ float w0gg(float a) { return 1-a; } __host__ __device__ float w1gg(float a) { return 3*a-2; } __host__ __device__ float w2gg(float a) { return 1-3*a; } __host__ __device__ float w3gg(float a) { return a; } // filter 4 values using cubic splines template<class T> __host__ __device__ T cubicFilter(float x, T c0, T c1, T c2, T c3) { T r; r = c0 * w0(x); r += c1 * w1(x); r += c2 * w2(x); r += c3 * w3(x); return r; } //filtering with derivative of basic functions template<class T> __host__ __device__ T cubicFilter_G(float x, T c0, T c1, T c2, T c3) { T r; r = c0 * w0g(x); r += c1 * w1g(x); r += c2 * w2g(x); r += c3 * w3g(x); return r; } //filtering with second derivative of basic functions template<class T> __host__ __device__ T cubicFilter_GG(float x, T c0, T c1, T c2, T c3) { T r; r = c0 * w0gg(x); r += c1 * w1gg(x); r += c2 * w2gg(x); r += c3 * w3gg(x); return r; } template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter<R>(fy, cubicFilter<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } //gradient in X direction template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GX(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter<R>(fy, cubicFilter_G<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GY(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter_G<R>(fy, cubicFilter<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } template<class T, class R> __device__ R tex3DBicubic(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY<T,R>(texref,x,y,pz-1), tex3DBicubicXY<T,R>(texref,x,y,pz), tex3DBicubicXY<T,R>(texref,x,y,pz+1), tex3DBicubicXY<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GX(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GX<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GX<T,R>(texref,x,y,pz), tex3DBicubicXY_GX<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GX<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GY(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GY<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GY<T,R>(texref,x,y,pz), tex3DBicubicXY_GY<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GY<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GZ(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter_G<R>(fz, tex3DBicubicXY<T,R>(texref,x,y,pz-1), tex3DBicubicXY<T,R>(texref,x,y,pz), tex3DBicubicXY<T,R>(texref,x,y,pz+1), tex3DBicubicXY<T,R>(texref,x,y,pz+2) ); } template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GGX(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter<R>(fy, cubicFilter_GG<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter_GG<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter_GG<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter_GG<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GGY(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter_GG<R>(fy, cubicFilter<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } //derivative through X, then through Y template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GYGX(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter_G<R>(fy, cubicFilter_G<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } template<class T, class R> __device__ R tex3DBicubic_GGX(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GGX<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GGX<T,R>(texref,x,y,pz), tex3DBicubicXY_GGX<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GGX<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GGY(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GGY<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GGY<T,R>(texref,x,y,pz), tex3DBicubicXY_GGY<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GGY<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GGZ(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter_GG<R>(fz, tex3DBicubicXY<T,R>(texref,x,y,pz-1), tex3DBicubicXY<T,R>(texref,x,y,pz), tex3DBicubicXY<T,R>(texref,x,y,pz+1), tex3DBicubicXY<T,R>(texref,x,y,pz+2) ); } //derivative through X, then through Y template<class T, class R> __device__ R tex3DBicubic_GYGX(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GYGX<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GYGX<T,R>(texref,x,y,pz), tex3DBicubicXY_GYGX<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GYGX<T,R>(texref,x,y,pz+2) ); } //derivative through X, then through Z template<class T, class R> __device__ R tex3DBicubic_GZGX(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter_G<R>(fz, tex3DBicubicXY_GX<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GX<T,R>(texref,x,y,pz), tex3DBicubicXY_GX<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GX<T,R>(texref,x,y,pz+2) ); } //derivative through Y, then through Z template<class T, class R> __device__ R tex3DBicubic_GZGY(const texture<T, 3, cudaReadModeElementType> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter_G<R>(fz, tex3DBicubicXY_GY<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GY<T,R>(texref,x,y,pz), tex3DBicubicXY_GY<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GY<T,R>(texref,x,y,pz+2) ); } __host__ __device__ int cu_getIndex2(int i, int j, int s1, int s2) { return i*s2+j; } __host__ __device__ double dotProduct(double *u, double *v, int s) { double result = 0; for (int i=0; i<s; i++) result += (u[i]*v[i]); return result; } __host__ __device__ double lenVec(double *a, int s) { double len = 0; for (int i=0; i<s; i++) len += (a[i]*a[i]); len = sqrt(len); return len; } __host__ __device__ void addVector(double *a, double *b, double *c, int len) { for (int i=0; i<len; i++) c[i] = a[i]+b[i]; } __host__ __device__ void scaleVector(double *a, int len, double scale) { for (int i=0; i<len; i++) a[i]*=scale; } void mulMatPoint(double X[4][4], double Y[4], double Z[4]) { for (int i=0; i<4; i++) Z[i] = 0; for (int i=0; i<4; i++) for (int k=0; k<4; k++) Z[i] += (X[i][k]*Y[k]); } __device__ void cu_mulMatPoint(double* X, double* Y, double* Z) { for (int i=0; i<4; i++) Z[i] = 0; for (int i=0; i<4; i++) for (int k=0; k<4; k++) Z[i] += (X[cu_getIndex2(i,k,4,4)]*Y[k]); } __device__ void cu_mulMatPoint3(double* X, double* Y, double* Z) { for (int i=0; i<3; i++) Z[i] = 0; for (int i=0; i<3; i++) for (int k=0; k<3; k++) Z[i] += (X[cu_getIndex2(i,k,3,3)]*Y[k]); } __host__ __device__ void advancePoint(double* point, double* dir, double scale, double* newpos) { for (int i=0; i<3; i++) newpos[i] = point[i]+dir[i]*scale; } __device__ bool cu_isInsideDouble(double i, double j, double k, int dim1, int dim2, int dim3) { return ((i>=0)&&(i<=(dim1-1))&&(j>=0)&&(j<=(dim2-1))&&(k>=0)&&(k<=(dim3-1))); } __device__ double cu_computeAlpha(double val, double grad_len, double isoval, double alphamax, double thickness) { if ((grad_len == 0.0) && (val == isoval)) return alphamax; else if ((grad_len>0.0) && (isoval >= (val-thickness*grad_len)) && (isoval <= (val+thickness*grad_len))) return alphamax*(1-abs(isoval-val)/(grad_len*thickness)); else return 0.0; } __device__ double cu_inAlpha(double val, double grad_len, double isoval, double thickness) { if (val >= isoval) return 1.0; else { return max(0.0,(1-abs(isoval-val)/(grad_len*thickness))); } } __device__ double cu_inAlphaX(double dis, double thickness) { if (dis<0) return 1.0; //return max(0.0,min(1.0,1.4-fabs(dis)/thickness)); return max(0.0,min(1.0,1.0-fabs(dis)/thickness)); } __device__ double cu_inAlphaX2(double dis, double thickness) { return max(0.0,1.0-fabs(dis)/thickness); } __host__ __device__ void normalize(double *a, int s) { double len = lenVec(a,s); for (int i=0; i<s; i++) a[i] = a[i]/len; } __host__ __device__ double diss2P(double x1,double y1,double z1,double x2,double y2,double z2) { double dis1 = x2-x1; double dis2 = y2-y1; double dis3 = z2-z1; return (dis1*dis1+dis2*dis2+dis3*dis3); } __host__ __device__ void mulMat3(double* X, double* Y, double* Z) { for (int i=0; i<3; i++) for (int j=0; j<3; j++) { for (int k=0; k<3; k++) { Z[cu_getIndex2(i,j,3,3)] += (X[cu_getIndex2(i,k,3,3)]*Y[cu_getIndex2(k,j,3,3)]); } } } __host__ __device__ void invertMat33(double X[][3], double Y[][3]) { double det = X[0][0]* (X[1][1]* X[2][2]- X[2][1]* X[1][2])- X[0][1]* (X[1][0]* X[2][2]- X[1][2]* X[2][0])+ X[0][2]* (X[1][0]* X[2][1]- X[1][1]* X[2][0]); double invdet = 1 / det; Y[0][0]= (X[1][1]* X[2][2]- X[2][1]* X[1][2]) * invdet; Y[0][1]= (X[0][2]* X[2][1]- X[0][1]* X[2][2]) * invdet; Y[0][2]= (X[0][1]* X[1][2]- X[0][2]* X[1][1])* invdet; Y[1][0]= (X[1][2]* X[2][0]- X[1][0]* X[2][2])* invdet; Y[1][1]= (X[0][0]* X[2][2]- X[0][2]* X[2][0])* invdet; Y[1][2]= (X[1][0]* X[0][2]- X[0][0]* X[1][2])* invdet; Y[2][0]= (X[1][0]* X[2][1]- X[2][0]* X[1][1])* invdet; Y[2][1]= (X[2][0]* X[0][1]- X[0][0]* X[2][1])* invdet; Y[2][2]= (X[0][0]* X[1][1]- X[1][0]* X[0][1]) * invdet; } __host__ __device__ void eigenOfHess(double* hessian, double *eigval) { double Dxx = hessian[cu_getIndex2(0,0,3,3)]; double Dyy = hessian[cu_getIndex2(1,1,3,3)]; double Dzz = hessian[cu_getIndex2(2,2,3,3)]; double Dxy = hessian[cu_getIndex2(0,1,3,3)]; double Dxz = hessian[cu_getIndex2(0,2,3,3)]; double Dyz = hessian[cu_getIndex2(1,2,3,3)]; double J1 = Dxx + Dyy + Dzz; double J2 = Dxx*Dyy + Dxx*Dzz + Dyy*Dzz - Dxy*Dxy - Dxz*Dxz - Dyz*Dyz; double J3 = 2*Dxy*Dxz*Dyz + Dxx*Dyy*Dzz - Dxz*Dxz*Dyy - Dxx*Dyz*Dyz - Dxy*Dxy*Dzz; double Q = (J1*J1-3*J2)/9; double R = (-9*J1*J2+27*J3+2*J1*J1*J1)/54; double theta = (1.0/3.0)*acos(R/sqrt(Q*Q*Q)); double sqrtQ = sqrt(Q); double twosqrtQ = 2*sqrtQ; double J1o3 = J1/3; eigval[0] = J1o3 + twosqrtQ*cos(theta); eigval[1] = J1o3 + twosqrtQ*cos(theta-2*M_PI/3); eigval[2] = J1o3 + twosqrtQ*cos(theta+2*M_PI/3); } __device__ void computeHessian(double *hessian, double *p) { hessian[cu_getIndex2(0,0,3,3)]=tex3DBicubic_GGX<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(0,1,3,3)]=tex3DBicubic_GYGX<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(0,2,3,3)]=tex3DBicubic_GZGX<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(1,1,3,3)]=tex3DBicubic_GGY<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(1,2,3,3)]=tex3DBicubic_GZGY<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(2,2,3,3)]=tex3DBicubic_GGZ<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(1,0,3,3)] = hessian[cu_getIndex2(0,1,3,3)]; hessian[cu_getIndex2(2,0,3,3)] = hessian[cu_getIndex2(0,2,3,3)]; hessian[cu_getIndex2(2,1,3,3)] = hessian[cu_getIndex2(1,2,3,3)]; } __device__ void computeHessian(double *hessian, double *p,const texture<float, 3, cudaReadModeElementType> tex0) { hessian[cu_getIndex2(0,0,3,3)]=tex3DBicubic_GGX<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(0,1,3,3)]=tex3DBicubic_GYGX<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(0,2,3,3)]=tex3DBicubic_GZGX<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(1,1,3,3)]=tex3DBicubic_GGY<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(1,2,3,3)]=tex3DBicubic_GZGY<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(2,2,3,3)]=tex3DBicubic_GGZ<float,float>(tex0,p[0],p[1],p[2]); hessian[cu_getIndex2(1,0,3,3)] = hessian[cu_getIndex2(0,1,3,3)]; hessian[cu_getIndex2(2,0,3,3)] = hessian[cu_getIndex2(0,2,3,3)]; hessian[cu_getIndex2(2,1,3,3)] = hessian[cu_getIndex2(1,2,3,3)]; } __host__ __device__ void cross(double *u, double *v, double *w) { w[0] = u[1]*v[2]-u[2]*v[1]; w[1] = u[2]*v[0]-u[0]*v[2]; w[2] = u[0]*v[1]-u[1]*v[0]; } __host__ __device__ float lerp(float y0, float y1, float x0, float x, float x1) { float alpha = (x-x0)/(x1-x0); return y0*(1-alpha)+alpha*y1; } __host__ __device__ float lerp(float y0, float y1, float alpha) { return y0*(1-alpha)+alpha*y1; } __device__ double max3(double x, double y, double z) { double max2 = (x>y?x:y); return max2>z?max2:z; } __device__ double clamp(double x0, double x1, double x) { return (x<x0)?x0:((x>x1)?x1:x); } //interpolate the volume in between __global__ void kernel_interpol(float *intervol, int* dim, float alpha) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; if ((i>=dim[1]) || (j>=dim[2]) || (k>=dim[3])) return; intervol[k*dim[2]*dim[1] + j*dim[1] + i] = lerp(tex3D(tex0,i,j,k),tex3D(tex1,i,j,k),alpha); if (i<=2 && j<=2 && k<=2) { printf("inside kernel_interpol, val at (%d,%d,%d) = %f\n", i,j,k,intervol[k*dim[2]*dim[1] + j*dim[1] + i]); printf("inside kernel_interpol, tex0 at (%d,%d,%d) = %f\n",i,j,k, tex3D(tex0,i,j,k)); } } __global__ void kernel_interpol2(float *intervol, float *intervol2, int* dim, float alpha) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; if ((i>=dim[1]) || (j>=dim[2]) || (k>=dim[3])) return; intervol[k*dim[2]*dim[1] + j*dim[1] + i] = lerp(tex3D(tex0,i,j,k),tex3D(tex1,i,j,k),alpha); intervol2[k*dim[2]*dim[1] + j*dim[1] + i] = lerp(tex3D(tex3,i,j,k),tex3D(tex4,i,j,k),alpha); } //test function __global__ void kernel_peak_test(int* dim, int *size, double verextent, double *center, double *dir1, double *dir2, double swidth, double sstep, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; //temporary test double refstep=sstep, thickness=0.5; double phongKa=0.2, phongKd=0.8; double light_dir[3]={0,0,1}; normalize(light_dir,3); double pixsize = verextent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double pointi[3]; advancePoint(center,dir1,ni*pixsize,pointi); advancePoint(pointi,dir2,nj*pixsize,pointi); double mipdir[3]; cross(dir1,dir2,mipdir); normalize(mipdir,3); double curpoint[3]; int k; for (k=0; k<3; k++) curpoint[k] = pointi[k] - mipdir[k]*swidth/2; double indPoint[4]; double gradgfpi[3]; double pointColor; double alpha; double valgfp; double hessian[9]; double hessian_33[3][3]; double hessian_33inv[3][3]; double hessian_inv[9]; double peakdis[3]; double len_peakdis; double pointColorGFP; double alphaGFP; double transpGFP = 1; double accColorGFP = 0; for (k=0; k<ceil(swidth/sstep); k++) { if (cu_isInsideDouble(curpoint[0],curpoint[1],curpoint[2],dim[1],dim[2],dim[3])) { computeHessian(hessian,curpoint,tex2); memcpy(hessian_33,hessian,sizeof(double)*9); invertMat33(hessian_33,hessian_33inv); memcpy(hessian_inv,hessian_33inv,sizeof(double)*9); gradgfpi[0] = tex3DBicubic_GX<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); gradgfpi[1] = tex3DBicubic_GY<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); gradgfpi[2] = tex3DBicubic_GZ<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); cu_mulMatPoint3(hessian_inv,gradgfpi,peakdis); scaleVector(peakdis,3,-1); len_peakdis = lenVec(peakdis,3); double eigenval[3]; eigenOfHess(hessian,eigenval); //printf("Inside kernel_peak, before checking if eigenval < 0\n"); if (eigenval[0]<0 && eigenval[1]<0 && eigenval[2]<0) //if (1) { //printf("there is something with eigenval < 0, len_peakdis = %f\n",len_peakdis); normalize(peakdis,3); double maxev = max3(eigenval[0],eigenval[1],eigenval[2]); pointColorGFP = phongKa + phongKd*max(0.0f,dotProduct(peakdis,light_dir,3)); alphaGFP = cu_inAlphaX(len_peakdis-100,thickness); //printf("(i,j,k)=(%d,%d,%d); len_peakdis = %f, alphaGFP = %f\n", i,j,k, len_peakdis, alphaGFP); //temporary disactivated for testing //alphaGFP *= clamp(0,1,lerp(0,1,8.0,-maxev,10.0)); //printf("(i,j,k)=(%d,%d,%d); -maxev = %f, after clamp(0,1,lerp(0,1,40.0,-maxev,41.0)): alphaGFP = %f\n", i,j,k,-maxev, alphaGFP); alphaGFP = 1 - pow(1-alphaGFP,sstep/refstep); //if (alphaGFP>0) // printf("alphaGFP > 0\n"); //debug purpose //alphaGFP = 1.0; //printf("(i,j,k)=(%d,%d,%d); after (1 - pow(1-alphaGFP,sstep/refstep)): alphaGFP = %f\n",i,j,k, alphaGFP); //transpGFP *= (1-alphaGFP); transpGFP = 0; //accColorGFP = accColorGFP*(1-alphaGFP) + pointColorGFP*alphaGFP; if (accColorGFP==0) accColorGFP = len_peakdis; else //if (len_peakdis>0) accColorGFP = min(accColorGFP,len_peakdis); //printf("(i,j,k)=(%d,%d,%d); accColorGFP = %f\n", accColorGFP); } } curpoint[0] = curpoint[0] + mipdir[0]*sstep; curpoint[1] = curpoint[1] + mipdir[1]*sstep; curpoint[2] = curpoint[2] + mipdir[2]*sstep; } double accAlphaGFP = 1 - transpGFP; imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = 0; if (accAlphaGFP>0) { imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = accColorGFP/accAlphaGFP; if (accColorGFP/accAlphaGFP>0) printf("accColorGFP/accAlphaGFP = %f, accAlphaGFP = %f\n",accColorGFP/accAlphaGFP,accAlphaGFP); } else { imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = accColorGFP; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = accAlphaGFP; } //finding peak __global__ void kernel_peak(int* dim, int *size, double verextent, double *center, double *dir1, double *dir2, double swidth, double sstep, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; //temporary test double refstep=sstep, thickness=1.0; double phongKa=0.2, phongKd=0.8; double light_dir[3]={0,0,1}; //double light_dir[3]={-1,-1,1}; normalize(light_dir,3); double pixsize = verextent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double pointi[3]; advancePoint(center,dir1,ni*pixsize,pointi); advancePoint(pointi,dir2,nj*pixsize,pointi); double mipdir[3]; cross(dir1,dir2,mipdir); normalize(mipdir,3); double curpoint[3]; int k; for (k=0; k<3; k++) curpoint[k] = pointi[k] - mipdir[k]*swidth/2; double indPoint[4]; double gradgfpi[3]; double pointColor; double alpha; double valgfp; double hessian[9]; double hessian_33[3][3]; double hessian_33inv[3][3]; double hessian_inv[9]; double peakdis[3]; double len_peakdis; double pointColorGFP; double alphaGFP; double transpGFP = 1; double accColorGFP = 0; for (k=0; k<ceil(swidth/sstep); k++) { if (cu_isInsideDouble(curpoint[0],curpoint[1],curpoint[2],dim[1],dim[2],dim[3])) { computeHessian(hessian,curpoint,tex2); memcpy(hessian_33,hessian,sizeof(double)*9); invertMat33(hessian_33,hessian_33inv); memcpy(hessian_inv,hessian_33inv,sizeof(double)*9); gradgfpi[0] = tex3DBicubic_GX<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); gradgfpi[1] = tex3DBicubic_GY<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); gradgfpi[2] = tex3DBicubic_GZ<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); cu_mulMatPoint3(hessian_inv,gradgfpi,peakdis); //scaleVector(peakdis,3,-1); len_peakdis = lenVec(peakdis,3); double eigenval[3]; eigenOfHess(hessian,eigenval); //printf("Inside kernel_peak, before checking if eigenval < 0\n"); if (eigenval[0]<0 && eigenval[1]<0 && eigenval[2]<0) //if (1) { //printf("there is something with eigenval < 0, len_peakdis = %f\n",len_peakdis); normalize(peakdis,3); double maxev = max3(eigenval[0],eigenval[1],eigenval[2]); pointColorGFP = phongKa + phongKd*max(0.0f,dotProduct(peakdis,light_dir,3)); alphaGFP = cu_inAlphaX(len_peakdis-8,thickness); //alphaGFP = cu_computeAlpha(len_peakdis, len_peakdis, 50, 1, thickness); //printf("(i,j,k)=(%d,%d,%d); len_peakdis = %f, alphaGFP = %f\n", i,j,k, len_peakdis, alphaGFP); //temporary deactivated for testing alphaGFP *= clamp(0,1,lerp(0,1,8.0,-maxev,10.0)); //alphaGFP *= clamp(0,1,lerp(0,1,6.0,-maxev,10.0)); //printf("(i,j,k)=(%d,%d,%d); -maxev = %f, after clamp(0,1,lerp(0,1,40.0,-maxev,41.0)): alphaGFP = %f\n", i,j,k,-maxev, alphaGFP); alphaGFP = 1 - pow(1-alphaGFP,sstep/refstep); //if (alphaGFP>0) // printf("alphaGFP > 0\n"); //debug purpose //alphaGFP = 1.0; //printf("(i,j,k)=(%d,%d,%d); after (1 - pow(1-alphaGFP,sstep/refstep)): alphaGFP = %f\n",i,j,k, alphaGFP); transpGFP *= (1-alphaGFP); accColorGFP = accColorGFP*(1-alphaGFP) + pointColorGFP*alphaGFP; //printf("(i,j,k)=(%d,%d,%d); accColorGFP = %f\n", accColorGFP); } } curpoint[0] = curpoint[0] + mipdir[0]*sstep; curpoint[1] = curpoint[1] + mipdir[1]*sstep; curpoint[2] = curpoint[2] + mipdir[2]*sstep; } double accAlphaGFP = 1 - transpGFP; imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = 0; if (accAlphaGFP>0) { imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = accColorGFP/accAlphaGFP; if (accColorGFP/accAlphaGFP>0) printf("accColorGFP/accAlphaGFP = %f, accAlphaGFP = %f\n",accColorGFP/accAlphaGFP,accAlphaGFP); } else { imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = accColorGFP; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = accAlphaGFP; } //peak with RFP constraint __global__ void kernel_peak_2chan(int* dim, int *size, double verextent, double *center, double *dir1, double *dir2, double swidth, double sstep, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; //temporary test double refstep=sstep, thickness=1.0; double phongKa=0.2, phongKd=0.8; double light_dir[3]={0,0,1}; normalize(light_dir,3); double isoval = 800; double alphamax = 1; double pixsize = verextent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double pointi[3]; advancePoint(center,dir1,ni*pixsize,pointi); advancePoint(pointi,dir2,nj*pixsize,pointi); double mipdir[3]; cross(dir1,dir2,mipdir); normalize(mipdir,3); double curpoint[3]; int k; for (k=0; k<3; k++) curpoint[k] = pointi[k] - mipdir[k]*swidth/2; double indPoint[4]; double gradgfpi[3]; double pointColor; double alpha; double valgfp; double hessian[9]; double hessian_33[3][3]; double hessian_33inv[3][3]; double hessian_inv[9]; double peakdis[3]; double len_peakdis; double pointColorGFP; double alphaGFP; double transpGFP = 1; double accColorGFP = 0; double gradi[3]; double gradi_len; double val; double accColor = 0; double mipVal = -1; double transp = 1; for (k=0; k<ceil(swidth/sstep); k++) { if (cu_isInsideDouble(curpoint[0],curpoint[1],curpoint[2],dim[1],dim[2],dim[3])) { val = tex3DBicubic<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); gradi[0] = tex3DBicubic_GX<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); gradi[1] = tex3DBicubic_GY<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); gradi[2] = tex3DBicubic_GZ<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); //cu_mulMatPoint3(MT_BE_inv, gradi, gradw); gradi_len = lenVec(gradi,3); //negating and normalizing for (int l=0; l<3; l++) gradi[l] = -gradi[l]/gradi_len; //depth = (k*1.0+1)/(fc*1.0-nc); pointColor = phongKa + phongKd*max(0.0f,dotProduct(gradi,light_dir,3)); alpha = cu_computeAlpha(val, gradi_len, isoval, alphamax, thickness); //alpha = 0.5; alpha = 1 - pow(1-alpha,sstep/refstep); transp *= (1-alpha); accColor = accColor*(1-alpha) + pointColor*alpha; //valgfp = tex3DBicubic<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); double inalpha = cu_inAlpha(val,gradi_len,isoval,thickness); if (inalpha>0) { computeHessian(hessian,curpoint,tex2); memcpy(hessian_33,hessian,sizeof(double)*9); invertMat33(hessian_33,hessian_33inv); memcpy(hessian_inv,hessian_33inv,sizeof(double)*9); gradgfpi[0] = tex3DBicubic_GX<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); gradgfpi[1] = tex3DBicubic_GY<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); gradgfpi[2] = tex3DBicubic_GZ<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); cu_mulMatPoint3(hessian_inv,gradgfpi,peakdis); //scaleVector(peakdis,3,-1); len_peakdis = lenVec(peakdis,3); double eigenval[3]; eigenOfHess(hessian,eigenval); //printf("Inside kernel_peak, before checking if eigenval < 0\n"); if (eigenval[0]<0 && eigenval[1]<0 && eigenval[2]<0) //if (1) { //printf("there is something with eigenval < 0\n"); normalize(peakdis,3); double maxev = max3(eigenval[0],eigenval[1],eigenval[2]); pointColorGFP = phongKa + phongKd*max(0.0f,dotProduct(peakdis,light_dir,3)); alphaGFP = cu_inAlphaX(len_peakdis-8,thickness); //printf("(i,j,k)=(%d,%d,%d); len_peakdis = %f, alphaGFP = %f\n", i,j,k, len_peakdis, alphaGFP); alphaGFP *= clamp(0,1,lerp(0,1,8.0,-maxev,10.0)); //printf("(i,j,k)=(%d,%d,%d); -maxev = %f, after clamp(0,1,lerp(0,1,40.0,-maxev,41.0)): alphaGFP = %f\n", i,j,k,-maxev, alphaGFP); alphaGFP = 1 - pow(1-alphaGFP,sstep/refstep); //debug purpose //alphaGFP = 1.0; //printf("(i,j,k)=(%d,%d,%d); after (1 - pow(1-alphaGFP,sstep/refstep)): alphaGFP = %f\n",i,j,k, alphaGFP); transpGFP *= (1-alphaGFP); accColorGFP = accColorGFP*(1-alphaGFP) + pointColorGFP*alphaGFP; //printf("(i,j,k)=(%d,%d,%d); accColorGFP = %f\n", accColorGFP); accColorGFP*=inalpha; } } } curpoint[0] = curpoint[0] + mipdir[0]*sstep; curpoint[1] = curpoint[1] + mipdir[1]*sstep; curpoint[2] = curpoint[2] + mipdir[2]*sstep; } double accAlphaGFP = 1 - transpGFP; double accAlpha = 1 - transp; //imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = 0; if (accAlpha>0) { imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = accColor/accAlpha; } else { imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = accColor; } if (accAlphaGFP>0) { imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = accColorGFP/accAlphaGFP; } else { imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = accColorGFP; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; //imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = accAlphaGFP; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = accAlpha; } //currently working in index-space //do MIP for a small slice around each point __global__ void kernel_cpr(int* dim, int *size, double verextent, double *center, double *dir1, double *dir2, double swidth, double sstep, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; double pixsize = verextent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double pointi[3]; advancePoint(center,dir1,ni*pixsize,pointi); advancePoint(pointi,dir2,nj*pixsize,pointi); double mipdir[3]; cross(dir1,dir2,mipdir); normalize(mipdir,3); double mipval = INT_MIN; double curpoint[3]; int k; for (k=0; k<3; k++) curpoint[k] = pointi[k] - mipdir[k]*swidth/2; for (k=0; k<ceil(swidth/sstep); k++) { double curval; //curval = tex3DBicubic<float,float>(tex0,curpoint[0],curpoint[1],curpoint[2]); curval = tex3DBicubic<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); mipval = MAX(mipval,curval); curpoint[0] = curpoint[0] + mipdir[0]*sstep; curpoint[1] = curpoint[1] + mipdir[1]*sstep; curpoint[2] = curpoint[2] + mipdir[2]*sstep; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = 0; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = mipval; for (int k=2; k<nOutChannel-1; k++) imageDouble[j*size[0]*nOutChannel+i*nOutChannel+k] = 0; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = 1; } __global__ void kernel_cpr_2chan(int* dim, int *size, double verextent, double *center, double *dir1, double *dir2, double swidth, double sstep, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; //temporary test double refstep=sstep, thickness=1.0; double phongKa=0.2, phongKd=0.8; double light_dir[3]={0,0,1}; //double light_dir[3]={1,1,1}; normalize(light_dir,3); double isoval = 800; //double isoval = 400; //double isoval = 1800; double alphamax = 1; double pixsize = verextent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double pointi[3]; advancePoint(center,dir1,ni*pixsize,pointi); advancePoint(pointi,dir2,nj*pixsize,pointi); double mipdir[3]; cross(dir1,dir2,mipdir); normalize(mipdir,3); //double mipval = INT_MIN; double curpoint[3]; int k; for (k=0; k<3; k++) curpoint[k] = pointi[k] - mipdir[k]*swidth/2; double gradi[3]; double gradi_len; double alpha; double val, valgfp; double pointColor; double accColor = 0; double mipVal = -1; double transp = 1; double mipRFP = -1; for (k=0; k<ceil(swidth/sstep); k++) { val = tex3DBicubic<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); mipRFP = MAX(val,mipRFP); gradi[0] = tex3DBicubic_GX<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); gradi[1] = tex3DBicubic_GY<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); gradi[2] = tex3DBicubic_GZ<float,float>(tex5,curpoint[0],curpoint[1],curpoint[2]); //cu_mulMatPoint3(MT_BE_inv, gradi, gradw); gradi_len = lenVec(gradi,3); //negating and normalizing for (int l=0; l<3; l++) gradi[l] = -gradi[l]/gradi_len; //depth = (k*1.0+1)/(fc*1.0-nc); pointColor = phongKa + phongKd*max(0.0f,dotProduct(gradi,light_dir,3)); alpha = cu_computeAlpha(val, gradi_len, isoval, alphamax, thickness); //alpha = 0.5; alpha = 1 - pow(1-alpha,sstep/refstep); transp *= (1-alpha); accColor = accColor*(1-alpha) + pointColor*alpha; valgfp = tex3DBicubic<float,float>(tex2,curpoint[0],curpoint[1],curpoint[2]); mipVal = max(mipVal,valgfp*cu_inAlpha(val,gradi_len,isoval,thickness)); curpoint[0] = curpoint[0] + mipdir[0]*sstep; curpoint[1] = curpoint[1] + mipdir[1]*sstep; curpoint[2] = curpoint[2] + mipdir[2]*sstep; } double accAlpha = 1 - transp; if (accAlpha>0) { imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = accColor/accAlpha; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = mipVal; //imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = mipRFP; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; } else { imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = accColor; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = mipVal; //imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = mipRFP; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = accAlpha; } __global__ void kernel_cprinter(double alpha, int* dim, int *size, double verextent, double *center, double *dir1, double *dir2, double swidth, double sstep, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; double pixsize = verextent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double pointi[3]; advancePoint(center,dir1,ni*pixsize,pointi); advancePoint(pointi,dir2,nj*pixsize,pointi); double mipdir[3]; cross(dir1,dir2,mipdir); normalize(mipdir,3); double mipval = INT_MIN; double curpoint[3]; int k; for (k=0; k<3; k++) curpoint[k] = pointi[k] - mipdir[k]*swidth/2; for (k=0; k<ceil(swidth/sstep); k++) { double curval; curval = lerp(tex3DBicubic<float,float>(tex0,curpoint[0],curpoint[1],curpoint[2]),tex3DBicubic<float,float>(tex1,curpoint[0],curpoint[1],curpoint[2]),alpha); mipval = MAX(mipval,curval); curpoint[0] = curpoint[0] + mipdir[0]*sstep; curpoint[1] = curpoint[1] + mipdir[1]*sstep; curpoint[2] = curpoint[2] + mipdir[2]*sstep; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = 0; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = mipval; for (int k=2; k<nOutChannel-1; k++) imageDouble[j*size[0]*nOutChannel+i*nOutChannel+k] = 0; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = 1; } void computeMean(double *points, int n, double *means) { memset(means,0,sizeof(double)*3); for (int i=0; i<3; i++) { for (int k=0; k<n; k++) means[i] += points[k*3+i]; means[i]/=n; } } void computeCovariance(double *points, int n, double *cov) { double means[3]; computeMean(points,n,means); //memset(cov,0,sizeof(double)*9); for (int i=0; i<3; i++) for (int j=i; j<3; j++) { double localcov = 0; for (int k=0; k<n; k++) { localcov += (points[k*3+i]-means[i])*(points[k*3+j]-means[j]); } localcov/=n; cov[cu_getIndex2(i,j,3,3)] = cov[cu_getIndex2(j,i,3,3)] = localcov; } } int isScaleOf(double *v1, double *v2, int s) { double factor; for (int i=0; i<s; i++) if (v1[i]) { factor = v2[i]/v1[i]; break; } for (int i=0; i<s; i++) if (v1[i]*factor != v2[i]) return 0; return 1; } //for symmetric 3x3 matrix void computeEigenVec(double *matrix, double eigval, double *eigvec) { double matrixtmp[9]; memcpy(matrixtmp,matrix,sizeof(double)*9); for (int i=0; i<3; i++) matrixtmp[cu_getIndex2(i,i,3,3)] = matrixtmp[cu_getIndex2(i,i,3,3)] - eigval; double col1[3], col2[3]; int ind = 0; for (ind = 0; ind<3; ind++) { if (matrixtmp[cu_getIndex2(0,ind,3,3)] || matrixtmp[cu_getIndex2(1,ind,3,3)] || matrixtmp[cu_getIndex2(2,ind,3,3)]) break; } if (ind<3) { for (int i=0; i<3; i++) col1[i] = matrixtmp[cu_getIndex2(i,ind,3,3)]; int ind2; for (ind2 = ind+1; ind2<3; ind2++) { if (matrixtmp[cu_getIndex2(0,ind2,3,3)] || matrixtmp[cu_getIndex2(1,ind2,3,3)] || matrixtmp[cu_getIndex2(2,ind2,3,3)]) break; } if (ind2<3) { for (int i=0; i<3; i++) col2[i] = matrixtmp[cu_getIndex2(i,ind2,3,3)]; if (isScaleOf(col1,col2,3)) { ind2++; if (ind2<3) { if (matrixtmp[cu_getIndex2(0,ind2,3,3)] || matrixtmp[cu_getIndex2(1,ind2,3,3)] || matrixtmp[cu_getIndex2(2,ind2,3,3)]) { for (int i=0; i<3; i++) col2[i] = matrixtmp[cu_getIndex2(i,ind2,3,3)]; if (isScaleOf(col1,col2,3)) { double tmp[3]; memcpy(tmp,col1,sizeof(double)*3); tmp[0]++; double tmp2[3]; cross(col1,tmp,tmp2); cross(tmp2,col1,eigvec); } else { cross(col1,col2,eigvec); } } else { double tmp[3]; memcpy(tmp,col1,sizeof(double)*3); tmp[0]++; double tmp2[3]; cross(col1,tmp,tmp2); cross(tmp2,col1,eigvec); } } else { double tmp[3]; memcpy(tmp,col1,sizeof(double)*3); tmp[0]++; double tmp2[3]; cross(col1,tmp,tmp2); cross(tmp2,col1,eigvec); } } else { cross(col1,col2,eigvec); } } else { double tmp[3]; memcpy(tmp,col1,sizeof(double)*3); tmp[0]++; double tmp2[3]; cross(col1,tmp,tmp2); cross(tmp2,col1,eigvec); } } else { eigvec[0] = eigvec[1] = eigvec[2] = 1; } normalize(eigvec,3); } void drawCircle(unsigned char *img, int s0, int s1, int s2, int drawchan, int c1, int c2, double rad) { double angstep = 0.2; for (double curang = 0; curang<2*M_PI; curang+=angstep) { int i1, i2; i2 = sin(curang)*rad; i1 = cos(curang)*rad; i1 += c1; i2 += c2; img[i2*s1*s0 + i1*s0 + drawchan] = 255; } } void drawCircleWithColor(unsigned char *img, int s0, int s1, int s2, int c1, int c2, double rad, double angstep, unsigned char color0, unsigned char color1, unsigned char color2) { for (double curang = 0; curang<2*M_PI; curang+=angstep) { int i1, i2; i2 = sin(curang)*rad; i1 = cos(curang)*rad; i1 += c1; i2 += c2; img[i2*s1*s0 + i1*s0 + 0] = color0; img[i2*s1*s0 + i1*s0 + 1] = color1; img[i2*s1*s0 + i1*s0 + 2] = color2; } } void drawCross(unsigned char *img, int s0, int s1, int s2, int drawchan, int c1, int c2, double rad) { for (int i=c1-rad; i<c1+rad; i++) img[c2*s1*s0 + i*s0 + drawchan] = 255; for (int i=c2-rad; i<c2+rad; i++) img[i*s1*s0 + c1*s0 + drawchan] = 255; } void drawCrossWithColor(unsigned char *img, int s0, int s1, int s2, int c1, int c2, double rad, unsigned char *color) { for (int k = 0; k<3; k++) { for (int i=c1-rad; i<c1+rad; i++) img[c2*s1*s0 + i*s0 + k] = color[k]; for (int i=c2-rad; i<c2+rad; i++) img[i*s1*s0 + c1*s0 + k] = color[k]; } } //draw the first N circles on the grid of RxC void drawNCircle(unsigned char *img, int s0, int s1, int s2, int drawchan, int N, int g1, int g2) { double rad; double w1 = s1/g1; double w2 = s2/g2; rad = w1<w2?w1/3:w2/3; for (int i=0; i<N; i++) { int gi1 = i/g1; int gi2 = i%g2; int pi1 = gi1*w1+w1/2; int pi2 = gi2*w2+w2/2; drawCircle(img,s0,s1,s2,drawchan,pi1,pi2,rad); } } double calDet44(double X[][4]) { double value = ( X[0][3]*X[1][2]*X[2][1]*X[3][0] - X[0][2]*X[1][3]*X[2][1]*X[3][0] - X[0][3]*X[1][1]*X[2][2]*X[3][0] + X[0][1]*X[1][3]*X[2][2]*X[3][0]+ X[0][2]*X[1][1]*X[2][3]*X[3][0] - X[0][1]*X[1][2]*X[2][3]*X[3][0] - X[0][3]*X[1][2]*X[2][0]*X[3][1] + X[0][2]*X[1][3]*X[2][0]*X[3][1]+ X[0][3]*X[1][0]*X[2][2]*X[3][1] - X[0][0]*X[1][3]*X[2][2]*X[3][1] - X[0][2]*X[1][0]*X[2][3]*X[3][1] + X[0][0]*X[1][2]*X[2][3]*X[3][1]+ X[0][3]*X[1][1]*X[2][0]*X[3][2] - X[0][1]*X[1][3]*X[2][0]*X[3][2] - X[0][3]*X[1][0]*X[2][1]*X[3][2] + X[0][0]*X[1][3]*X[2][1]*X[3][2]+ X[0][1]*X[1][0]*X[2][3]*X[3][2] - X[0][0]*X[1][1]*X[2][3]*X[3][2] - X[0][2]*X[1][1]*X[2][0]*X[3][3] + X[0][1]*X[1][2]*X[2][0]*X[3][3]+ X[0][2]*X[1][0]*X[2][1]*X[3][3] - X[0][0]*X[1][2]*X[2][1]*X[3][3] - X[0][1]*X[1][0]*X[2][2]*X[3][3] + X[0][0]*X[1][1]*X[2][2]*X[3][3] ); return value; } void invertMat44(double X[][4], double Y[][4]) { double det = calDet44(X); Y[0][0] = X[1][2]*X[2][3]*X[3][1] - X[1][3]*X[2][2]*X[3][1] + X[1][3]*X[2][1]*X[3][2] - X[1][1]*X[2][3]*X[3][2] - X[1][2]*X[2][1]*X[3][3] + X[1][1]*X[2][2]*X[3][3]; Y[0][1] = X[0][3]*X[2][2]*X[3][1] - X[0][2]*X[2][3]*X[3][1] - X[0][3]*X[2][1]*X[3][2] + X[0][1]*X[2][3]*X[3][2] + X[0][2]*X[2][1]*X[3][3] - X[0][1]*X[2][2]*X[3][3]; Y[0][2] = X[0][2]*X[1][3]*X[3][1] - X[0][3]*X[1][2]*X[3][1] + X[0][3]*X[1][1]*X[3][2] - X[0][1]*X[1][3]*X[3][2] - X[0][2]*X[1][1]*X[3][3] + X[0][1]*X[1][2]*X[3][3]; Y[0][3] = X[0][3]*X[1][2]*X[2][1] - X[0][2]*X[1][3]*X[2][1] - X[0][3]*X[1][1]*X[2][2] + X[0][1]*X[1][3]*X[2][2] + X[0][2]*X[1][1]*X[2][3] - X[0][1]*X[1][2]*X[2][3]; Y[1][0] = X[1][3]*X[2][2]*X[3][0] - X[1][2]*X[2][3]*X[3][0] - X[1][3]*X[2][0]*X[3][2] + X[1][0]*X[2][3]*X[3][2] + X[1][2]*X[2][0]*X[3][3] - X[1][0]*X[2][2]*X[3][3]; Y[1][1] = X[0][2]*X[2][3]*X[3][0] - X[0][3]*X[2][2]*X[3][0] + X[0][3]*X[2][0]*X[3][2] - X[0][0]*X[2][3]*X[3][2] - X[0][2]*X[2][0]*X[3][3] + X[0][0]*X[2][2]*X[3][3]; Y[1][2] = X[0][3]*X[1][2]*X[3][0] - X[0][2]*X[1][3]*X[3][0] - X[0][3]*X[1][0]*X[3][2] + X[0][0]*X[1][3]*X[3][2] + X[0][2]*X[1][0]*X[3][3] - X[0][0]*X[1][2]*X[3][3]; Y[1][3] = X[0][2]*X[1][3]*X[2][0] - X[0][3]*X[1][2]*X[2][0] + X[0][3]*X[1][0]*X[2][2] - X[0][0]*X[1][3]*X[2][2] - X[0][2]*X[1][0]*X[2][3] + X[0][0]*X[1][2]*X[2][3]; Y[2][0] = X[1][1]*X[2][3]*X[3][0] - X[1][3]*X[2][1]*X[3][0] + X[1][3]*X[2][0]*X[3][1] - X[1][0]*X[2][3]*X[3][1] - X[1][1]*X[2][0]*X[3][3] + X[1][0]*X[2][1]*X[3][3]; Y[2][1] = X[0][3]*X[2][1]*X[3][0] - X[0][1]*X[2][3]*X[3][0] - X[0][3]*X[2][0]*X[3][1] + X[0][0]*X[2][3]*X[3][1] + X[0][1]*X[2][0]*X[3][3] - X[0][0]*X[2][1]*X[3][3]; Y[2][2] = X[0][1]*X[1][3]*X[3][0] - X[0][3]*X[1][1]*X[3][0] + X[0][3]*X[1][0]*X[3][1] - X[0][0]*X[1][3]*X[3][1] - X[0][1]*X[1][0]*X[3][3] + X[0][0]*X[1][1]*X[3][3]; Y[2][3] = X[0][3]*X[1][1]*X[2][0] - X[0][1]*X[1][3]*X[2][0] - X[0][3]*X[1][0]*X[2][1] + X[0][0]*X[1][3]*X[2][1] + X[0][1]*X[1][0]*X[2][3] - X[0][0]*X[1][1]*X[2][3]; Y[3][0] = X[1][2]*X[2][1]*X[3][0] - X[1][1]*X[2][2]*X[3][0] - X[1][2]*X[2][0]*X[3][1] + X[1][0]*X[2][2]*X[3][1] + X[1][1]*X[2][0]*X[3][2] - X[1][0]*X[2][1]*X[3][2]; Y[3][1] = X[0][1]*X[2][2]*X[3][0] - X[0][2]*X[2][1]*X[3][0] + X[0][2]*X[2][0]*X[3][1] - X[0][0]*X[2][2]*X[3][1] - X[0][1]*X[2][0]*X[3][2] + X[0][0]*X[2][1]*X[3][2]; Y[3][2] = X[0][2]*X[1][1]*X[3][0] - X[0][1]*X[1][2]*X[3][0] - X[0][2]*X[1][0]*X[3][1] + X[0][0]*X[1][2]*X[3][1] + X[0][1]*X[1][0]*X[3][2] - X[0][0]*X[1][1]*X[3][2]; Y[3][3] = X[0][1]*X[1][2]*X[2][0] - X[0][2]*X[1][1]*X[2][0] + X[0][2]*X[1][0]*X[2][1] - X[0][0]*X[1][2]*X[2][1] - X[0][1]*X[1][0]*X[2][2] + X[0][0]*X[1][1]*X[2][2]; for (int i=0; i<4; i++) for (int j=0; j<4; j++) Y[i][j] = Y[i][j]/det; } void subtractVec(double *a, double *b, double *c, int s) { for (int i=0; i<s; i++) c[i] = a[i]-b[i]; } void negateVec(double *a, int s) { for (int i=0; i<s; i++) a[i] = -a[i]; } //s1,s2,s3: fastest to slowest void sliceImageDouble(double *input, int s1, int s2, int s3, double *output, int indS1) { for (int i=0; i<s3; i++) for (int j=0; j<s2; j++) { output[i*s2+j] = input[i*s2*s1+j*s1+indS1]*input[i*s2*s1+j*s1+s1-1]; } } unsigned char quantizeDouble(double val, double minVal, double maxVal) { return CLIP((val-minVal)*255.0/(maxVal-minVal),0,255); } //3D data, fastest to slowest void quantizeImageDouble3D(double *input, unsigned char *output, int s0, int s1, int s2) { double maxVal[4]; maxVal[0] = maxVal[1] = maxVal[2] = maxVal[3] = -(1<<15); double minVal[4]; minVal[0] = minVal[1] = minVal[2] = minVal[3] = ((1<<15) - 1); for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { if (input[i*s1*s0+j*s0+k]>maxVal[k]) maxVal[k] = input[i*s1*s0+j*s0+k]; if (input[i*s1*s0+j*s0+k]<minVal[k]) minVal[k] = input[i*s1*s0+j*s0+k]; } for (int i=0; i<4; i++) printf("minmax %d = [%f,%f]\n",i,minVal[i],maxVal[i]); for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { output[i*s1*s0+j*s0+k] = quantizeDouble(input[i*s1*s0+j*s0+k],minVal[k],maxVal[k]); } } void quantizeImageDouble3D_Range(double *input, unsigned char *output, int s0, int s1, int s2, double *range) { for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { output[i*s1*s0+j*s0+k] = quantizeDouble(input[i*s1*s0+j*s0+k],range[k*2],range[k*2+1]); } } template<class T> void quantizeImage3D(T *input, unsigned char *output, int s0, int s1, int s2) { double maxVal[4]; maxVal[0] = maxVal[1] = maxVal[2] = maxVal[3] = -(1<<15); double minVal[4]; minVal[0] = minVal[1] = minVal[2] = minVal[3] = ((1<<15) - 1); for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { if (input[i*s1*s0+j*s0+k]>maxVal[k]) maxVal[k] = input[i*s1*s0+j*s0+k]; if (input[i*s1*s0+j*s0+k]<minVal[k]) minVal[k] = input[i*s1*s0+j*s0+k]; } for (int i=0; i<4; i++) printf("minmax %d = [%f,%f]\n",i,minVal[i],maxVal[i]); for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { output[i*s1*s0+j*s0+k] = quantizeDouble(input[i*s1*s0+j*s0+k],minVal[k],maxVal[k]); } } void applyMask(unsigned char *input, int s0, int s1, int s2, int *mask, unsigned char *output) { for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { output[i*s1*s0+j*s0+k] = input[i*s1*s0+j*s0+k]*mask[i*s1+j]; } } void removeChannel(unsigned char *input, int s0, int s1, int s2, int chan, unsigned char *output) { memcpy(output,input,s0*s1*s2*sizeof(unsigned char)); for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) output[i*s1*s0+j*s0+chan] = 0; } //---end of cuda_volume_rendering functions template<class T> void setPlane(T* image, int s1, int s2, int s3, T val, int s1i) { for (int i=0; i<s3; i++) for (int j=0; j<s2; j++) image[i*s2*s1+j*s1+s1i] = val; } void transposeMat33(double X[][3], double Y[][3]) { for (int i=0; i<3; i++) for (int j=i; j<3; j++) { Y[i][j]=X[j][i]; Y[j][i]=X[i][j]; } } float linearizeDepth(float depth, float zNear, float zFar) { return (2.0 * zFar * zNear) / (zFar + zNear - depth * (zFar - zNear)); } float linearizeDepthOrtho(float depth, float zNear, float zFar) { //the returned depth is relative to the "at" point return (depth*(zFar-zNear)+zFar+zNear)/2; } template<class T> void saveImage(int width, int height, int nchan, T *data, char *name) { TGAImage *img = new TGAImage(width,height); unsigned char* dataQuantized = new unsigned char[height*width*nchan]; quantizeImage3D<T>(data,dataQuantized,nchan,width,height); Colour c; for(int x=0; x<height; x++) for(int y=0; y<width; y++) { c.a = 255; c.b = c.g = c.r = 0; switch (nchan) { case 4: c.a = dataQuantized[x*width*nchan+y*nchan+3]; case 3: c.b = dataQuantized[x*width*nchan+y*nchan+2]; case 2: c.g = dataQuantized[x*width*nchan+y*nchan+1]; case 1: c.r = dataQuantized[x*width*nchan+y*nchan]; } img->setPixel(c,x,y); } img->WriteImage(name); delete img; delete[] dataQuantized; } template<class T> void saveImageWithoutQuantizing(int width, int height, int nchan, T *data, char *name) { TGAImage *img = new TGAImage(width,height); Colour c; for(int x=0; x<height; x++) for(int y=0; y<width; y++) { c.a = 255; c.b = c.g = c.r = 0; switch (nchan) { case 4: c.a = data[x*width*nchan+y*nchan+3]; case 3: c.b = data[x*width*nchan+y*nchan+2]; case 2: c.g = data[x*width*nchan+y*nchan+1]; case 1: c.r = data[x*width*nchan+y*nchan]; } img->setPixel(c,x,y); } img->WriteImage(name); delete img; } //image1 and image2 should have same spatial size (except number of channels, i.e. fastest axis) template <class T1, class T2> void copyImageChannel(T1 *image1,int s10,int s11,int s12,int c1,T2 *image2,int s20,int c2) { for (int i=0; i<s12; i++) for (int j=0; j<s11; j++) { int ind1 = i*s11*s10 + j*s10 + c1; int ind2 = i*s11*s20 + j*s20 + c2; image2[ind2] = image1[ind1]; } } double computeAngle(double *v1, double *v2) { double dp = dotProduct(v1,v2,3); return acos(dp)*180/M_PI; } void render(Hale::Viewer *viewer){ viewer->draw(); viewer->bufferSwap(); } glm::vec4 convertDepthBuffToWorldPos(int w, int h, double depth, Hale::Viewer *viewer) { double depthv = linearizeDepthOrtho(lerp(-1,1,0,depth,1),viewer->camera.clipNear(),viewer->camera.clipFar()); double wv, hv; wv = w-viewer->widthBuffer()/2; hv = h-viewer->heightBuffer()/2; glm::vec3 diff = viewer->camera.at() - viewer->camera.from(); double dist = glm::length(diff); double fangle = viewer->camera.fov()*AIR_PI/360; double vextent = dist*tan(fangle)*2; double pixelsize = vextent/viewer->heightBuffer(); wv = wv*pixelsize; hv = hv*pixelsize; depthv = -(depthv+dist); printf("Inside convertDepthBuffToWorldPos: viewpos = %f,%f,%f\n",wv,hv,depthv); glm::vec4 vpos; vpos.x = wv; vpos.y = hv; vpos.z = depthv; vpos.w = 1; glm::vec4 result; result = viewer->camera.viewInv()*vpos; return result; } glm::vec4 convertDepthBuffToViewPos(int w, int h, double depth, Hale::Viewer *viewer) { double depthv = linearizeDepthOrtho(lerp(-1,1,0,depth,1),viewer->camera.clipNear(),viewer->camera.clipFar()); double wv, hv; wv = w-viewer->widthBuffer()/2; hv = h-viewer->heightBuffer()/2; glm::vec3 diff = viewer->camera.at() - viewer->camera.from(); double dist = glm::length(diff); double fangle = viewer->camera.fov()*AIR_PI/360; double vextent = dist*tan(fangle)*2; double pixelsize = vextent/viewer->heightBuffer(); wv = wv*pixelsize; hv = hv*pixelsize; depthv = -(depthv+dist); printf("Inside convertDepthBuffToViewPos: winpos = %d,%d; viewpos = %f,%f,%f\n",w,h,wv,hv,depthv); glm::vec4 vpos; vpos.x = wv; vpos.y = hv; vpos.z = depthv; vpos.w = 1; return vpos; } glm::vec4 convertWorldToViewPos(double x, double y, double z, Hale::Viewer *viewer) { glm::vec4 wpos; wpos.x = x; wpos.y = y; wpos.z = z; wpos.w = 1; glm::vec4 vpos; vpos = viewer->camera.view()*wpos; return vpos; } void mainInit() { for (int i=0; i<=NTEX; i++) d_volumeArray[i] = 0; } class Queue { public: Queue(int isize) { size = isize; nin = nrrdNew(); filemem0 = filemem1 = 0; } Queue() { size = NTEX; nin = nrrdNew(); filemem0 = filemem1 = 0; } ~Queue() { } /* cudaArray* find(int time) { if (timetoindex.find(time) == timetoindex.end()) return NULL; else return d_volumeArray[timetoindex[time]]; } */ int find(int time) { if (timetoindex.find(time) == timetoindex.end()) return -1; else return timetoindex[time]; } int findFarthestTime(int time) { int dismax = -1; int maxind; for (int i=0; i<elems.size(); i++) if (abs(time-elems[i])>dismax) { dismax = abs(time-elems[i]); maxind = i; } return maxind; } //cudaArray* push(int time, int *arr_nameid, char* pathprefix, airArray *mop) int push(int time, int *arr_nameid, char* pathprefix, airArray *mop) { printf("Inside Queue.push(): time = %d\n",time); for (int i=0; i<elems.size(); i++) printf("%d ", elems[i]); printf("\n"); //cudaArray* findres = find(time); int findres = find(time); if (findres>=0) { printf("findres = %d\n", findres); return findres; } int curvol; if (elems.size()<size) { elems.push_back(time); timetoindex[time] = elems.size()-1; curvol = elems.size()-1; printf("curvol in 'if': %d\n",curvol); } else { curvol = findFarthestTime(time); timetoindex.erase(elems[curvol]); elems[curvol] = time; timetoindex[time] = curvol; printf("curvol in 'else': %d\n",curvol); } char inname[1000]; char *err; int curnameind = arr_nameid[time]; sprintf(inname,"%s/%d.nrrd",pathprefix,curnameind); cout<<"inname = "<<inname<<endl; if (nrrdLoad(nin, inname, NULL)) { err = biffGetDone(NRRD); fprintf(stderr, "%s: trouble reading \"%s\":\n%s", "Queue.push()", inname, err); free(err); exit(1); } cout<<"read file "<<inname<<endl; unsigned int pixSize; cudaChannelFormatDesc channelDesc; pixSize = sizeof(float); channelDesc = cudaCreateChannelDesc<float>(); /* if (3 != nin->dim && 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 3D space, (not %uD in %uD)\n", "Queue.push()", nin->dim, nin->spaceDim); airMopError(mop); exit(1); } */ if (nin->dim == 3) { dim[0] = 1; dim[1] = nin->axis[0].size; dim[2] = nin->axis[1].size; dim[3] = nin->axis[2].size; } else //4-channel { dim[0] = nin->axis[0].size; dim[1] = nin->axis[1].size; dim[2] = nin->axis[2].size; dim[3] = nin->axis[3].size; } int channel = 1; if (!filemem0) { printf("in Queue.push, dim=[%d,%d,%d,%d], before allocating filemem\n",dim[0],dim[1],dim[2],dim[3]); filemem0 = new float[dim[1]*dim[2]*dim[3]]; filemem1 = new float[dim[1]*dim[2]*dim[3]]; } printf("in Queue.push, before setting filemem\n"); if (nin->dim == 3) { for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i]; } } else { for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i*2]; filemem1[i] = ((short*)nin->data)[i*2+1]; } } const cudaExtent volumeSize = make_cudaExtent(dim[1], dim[2], dim[3]); if (!d_volumeArray[curvol]) { cudaMalloc3DArray(&d_volumeArray[curvol], &channelDesc, volumeSize); cudaMalloc3DArray(&d_volumeArray1[curvol], &channelDesc, volumeSize); } cudaMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_cudaPitchedPtr((void*)filemem0, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[curvol]; copyParams0.extent = volumeSize; copyParams0.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams0); cudaMemcpy3DParms copyParams1 = {0}; copyParams1.srcPtr = make_cudaPitchedPtr((void*)filemem1, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray1[curvol]; copyParams1.extent = volumeSize; copyParams1.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams1); printf("end of Queue.push()\n"); //return d_volumeArray[curvol]; return curvol; } int* getDataDim() { return dim; } private: int size; int cursize; vector<int> elems; unordered_map<int,int> timetoindex; //nin data float *filemem0, *filemem1; Nrrd *nin; int dim[4]; }; void interpolVolAndRender(int &curVolInMem, int mini, double alpha, Queue &queue, int *arr_nameid, double *arr_center, char *pathprefix, airArray *mop, unsigned int pixSize, int *dim, int *size, double *eigenvec, double verextent2, double swidth, double sstep, int nOutChannel, float *d_volmem, float *d_volmem2, int *d_dim, int *d_size, double *d_dir1, double *d_dir2, double *d_center, double *imageDouble, double *d_imageDouble, unsigned char *imageQuantized, Hale::Viewer &viewer, Hale::Viewer &viewer2, Hale::Polydata *hpldview2, Hale::Polydata *hpld_inter, double spherescale, Hale::Polydata *hpld_sq_inter, bool statePKey, int kern, bool stateIKey) { int count; double dir1[3],dir2[3],center[3]; const cudaExtent volumeSize = make_cudaExtent(dim[1], dim[2], dim[3]); cudaChannelFormatDesc channelDesc; channelDesc = cudaCreateChannelDesc<float>(); if (curVolInMem != mini) { curVolInMem = mini; count = mini; cudaError_t errCu; //cudaArray* d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); cudaArray* d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex0.normalized = false; tex0.filterMode = cudaFilterModeLinear; tex0.addressMode[0] = cudaAddressModeBorder; tex0.addressMode[1] = cudaAddressModeBorder; tex0.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex0, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex3.normalized = false; tex3.filterMode = cudaFilterModeLinear; tex3.addressMode[0] = cudaAddressModeBorder; tex3.addressMode[1] = cudaAddressModeBorder; tex3.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex3, d_curvolarr, channelDesc); count = mini+1; //d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex1.normalized = false; tex1.filterMode = cudaFilterModeLinear; tex1.addressMode[0] = cudaAddressModeBorder; tex1.addressMode[1] = cudaAddressModeBorder; tex1.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex1, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex4.normalized = false; tex4.filterMode = cudaFilterModeLinear; tex4.addressMode[0] = cudaAddressModeBorder; tex4.addressMode[1] = cudaAddressModeBorder; tex4.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex4, d_curvolarr, channelDesc); } int numThread1D; numThread1D = 8; dim3 threadsPerBlock(numThread1D,numThread1D,numThread1D); dim3 numBlocks((dim[1]+numThread1D-1)/numThread1D,(dim[2]+numThread1D-1)/numThread1D,(dim[3]+numThread1D-1)/numThread1D); //kernel_interpol<<<numBlocks,threadsPerBlock>>>(d_volmem,d_dim,alpha); kernel_interpol2<<<numBlocks,threadsPerBlock>>>(d_volmem,d_volmem2,d_dim,alpha); cudaError_t errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error After kernel_nterpol when clicking: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync After kernel_nterpol when clicking: %s\n", cudaGetErrorString(errCu)); //copy from device's global mem to texture mem if (!d_volumeArray[NTEX]) { cudaMalloc3DArray(&d_volumeArray[NTEX], &channelDesc, volumeSize); cudaMalloc3DArray(&d_volumeArray1[NTEX], &channelDesc, volumeSize); } cudaMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_cudaPitchedPtr((void*)d_volmem, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[NTEX]; copyParams0.extent = volumeSize; copyParams0.kind = cudaMemcpyDeviceToDevice; cudaMemcpy3D(&copyParams0); tex2.normalized = false; tex2.filterMode = cudaFilterModeLinear; tex2.addressMode[0] = cudaAddressModeBorder; tex2.addressMode[1] = cudaAddressModeBorder; tex2.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex2, d_volumeArray[NTEX], channelDesc); cudaMemcpy3DParms copyParams1 = {0}; copyParams1.srcPtr = make_cudaPitchedPtr((void*)d_volmem2, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray1[NTEX]; copyParams1.extent = volumeSize; copyParams1.kind = cudaMemcpyDeviceToDevice; cudaMemcpy3D(&copyParams1); tex5.normalized = false; tex5.filterMode = cudaFilterModeLinear; tex5.addressMode[0] = cudaAddressModeBorder; tex5.addressMode[1] = cudaAddressModeBorder; tex5.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex5, d_volumeArray1[NTEX], channelDesc); //after that call the normal kernel to do MIP count = mini; double FT[3]; double FN[3],FB[3]; double dr[3],ddr[3]; if (kern==1) { for (int i=0; i<3; i++) { center[i] = cubicFilter<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); dr[i] = cubicFilter_G<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); ddr[i] = cubicFilter_GG<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } else { for (int i=0; i<3; i++) { center[i] = ctmr(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); dr[i] = ctmr_g(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); ddr[i] = ctmr_gg(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } normalize(dr,3); normalize(ddr,3); memcpy(FT,dr,sizeof(double)*3); memcpy(FN,eigenvec,sizeof(double)*3); normalize(FN,3); cross(FT,FN,FB); cross(FB,FT,FN); memcpy(dir1,FN,sizeof(double)*3); memcpy(dir2,FB,sizeof(double)*3); cudaMemcpy(d_dir1, dir1, 3*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_dir2, dir2, 3*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_center,center,3*sizeof(double), cudaMemcpyHostToDevice); numThread1D = 16; dim3 threadsPerBlock2(numThread1D,numThread1D); dim3 numBlocks2((size[0]+numThread1D-1)/numThread1D,(size[1]+numThread1D-1)/numThread1D); if (statePKey) { if (stateIKey) kernel_peak_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_peak<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } else { if (stateIKey) kernel_cpr_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_cpr<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error After kernel_cpr when clicking: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync After kernel_cpr when clicking: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); short width = size[0]; short height = size[1]; //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); if (statePKey) quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); else quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); glm::mat4 fmat2 = glm::mat4(); fmat2[0][0] = spherescale; fmat2[1][1] = spherescale; fmat2[2][2] = spherescale; fmat2[3][0] = center[0]; fmat2[3][1] = center[1]; fmat2[3][2] = center[2]; hpld_inter->model(fmat2); viewer.current(); glm::mat4 tmat_sq_inter = glm::mat4(); tmat_sq_inter[0][0] = FN[0]; tmat_sq_inter[0][1] = FN[1]; tmat_sq_inter[0][2] = FN[2]; tmat_sq_inter[0][3] = 0; tmat_sq_inter[1][0] = FB[0]; tmat_sq_inter[1][1] = FB[1]; tmat_sq_inter[1][2] = FB[2]; tmat_sq_inter[1][3] = 0; tmat_sq_inter[2][0] = FT[0]; tmat_sq_inter[2][1] = FT[1]; tmat_sq_inter[2][2] = FT[2]; tmat_sq_inter[2][3] = 0; tmat_sq_inter[3][0] = center[0]; tmat_sq_inter[3][1] = center[1]; tmat_sq_inter[3][2] = center[2]; tmat_sq_inter[3][3] = 1; glm::mat4 smat_sq_inter = glm::mat4(); smat_sq_inter[0][0] = 2; smat_sq_inter[1][1] = 2; glm::mat4 fmat_sq_inter = tmat_sq_inter*smat_sq_inter; hpld_sq_inter->model(fmat_sq_inter); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); //hpld_sq_inter->setTexture((char*)"myTextureSampler",(unsigned char *)imageQuantized,size[0],size[1],4); } int main(int argc, const char **argv) { const char *me; char *err; hestOpt *hopt=NULL; hestParm *hparm; airArray *mop; //cache queue for GPU memory //int queue[NTEX]; //int queueCurSize = 0; //unordered_map<int,int> timetoindex; Queue queue; char *name; char *texname1, *texname2; double dir1[3],dir2[3]; //tmp fixed track coords, and radius double track[3] = {366.653991263,89.6381792864,104.736646409}; double trackhomo[4]; trackhomo[0] = track[0]; trackhomo[1] = track[1]; trackhomo[2] = track[2]; trackhomo[3] = 1; double trackw[4]; double radius = 10; double center[3]; int size[2]; Nrrd *nin; char *outname; char inname[100]; char *centername; double swidth, sstep; //width and step to take inside the slice short *outdata; char outnameslice[100]; double verextent; //vertical extent to project MIP char *pathprefix; int kern; int curVolInMem; /* boilerplate hest code */ me = argv[0]; mop = airMopNew(); hparm = hestParmNew(); airMopAdd(mop, hparm, (airMopper)hestParmFree, airMopAlways); /* setting up the command-line options */ hparm->respFileEnable = AIR_TRUE; hparm->noArgsIsNoProblem = AIR_TRUE; hestOptAdd(&hopt, "isize", "sx sy", airTypeUInt, 2, 2, size, "200 200", "output image sizes"); hestOptAdd(&hopt, "vex", "ve", airTypeDouble, 1, 1, &verextent, "200", "vertical extent in projecting MIP"); hestOptAdd(&hopt, "kern", "kernel", airTypeInt, 1, 1, &kern, "0", "kernel used in convolution"); hestOptAdd(&hopt, "dir1", "x y z", airTypeDouble, 3, 3, dir1, "1 0 0", "first direction of the generated image"); hestOptAdd(&hopt, "dir2", "x y z", airTypeDouble, 3, 3, dir2, "0 -1 0", "second direction of the generated image"); hestOptAdd(&hopt, "swidth", "sw", airTypeDouble, 1, 1, &swidth, "1", "the width of the slice to cut"); hestOptAdd(&hopt, "sstep", "ss", airTypeDouble, 1, 1, &sstep, "1", "the step of Maximum Intensity Projection through slice"); hestOptAdd(&hopt, "i", "name", airTypeString, 1, 1, &centername, "coord_newtrack_pioneer.txt", "name of files centaining centers"); hestOptAdd(&hopt, "pref", "path", airTypeString, 1, 1, &pathprefix, "/media/trihuynh/781B8CE3469A7908/scivisdata", "prefix of the path to the folder containing data files"); hestOptAdd(&hopt, "o", "name", airTypeString, 1, 1, &outname, "cpr.nrrd", "name of output image"); hestParseOrDie(hopt, argc-1, argv+1, hparm, me, "demo program", AIR_TRUE, AIR_TRUE, AIR_TRUE); airMopAdd(mop, hopt, (airMopper)hestOptFree, airMopAlways); airMopAdd(mop, hopt, (airMopper)hestParseFree, airMopAlways); /* Compute threshold (isovalue) */ cout<<"After TEEM processing of input arguments"<<endl; mainInit(); int countline = 0; string line; ifstream infile(centername); int *arr_nameid; double *arr_center; while (std::getline(infile, line)) { ++countline; } infile.clear(); infile.seekg(0, ios::beg); arr_nameid = new int[countline]; arr_center = new double[countline*3]; for (int i=0; i<countline; i++) { infile >> arr_nameid[i]; infile >> arr_center[i*3]; infile >> arr_center[i*3+1]; infile >> arr_center[i*3+2]; } infile.close(); cout<<"Initialized countline = "<<countline<<endl; //double thresdis = 1.0; double thresdis = -1.0; //not checking vector<double> vcenter; vector<int> vnameid; vcenter.push_back(arr_center[0]); vcenter.push_back(arr_center[1]); vcenter.push_back(arr_center[2]); vnameid.push_back(arr_nameid[0]); //double thresang = 150; double thresang = 200; //not checking //correction by thresholding distance for (int i=1; i<countline; i++) { int countv = vcenter.size(); if (diss2P(vcenter[countv-3],vcenter[countv-2],vcenter[countv-1],arr_center[i*3+0],arr_center[i*3+1],arr_center[i*3+2])<thresdis) { continue; } else { vcenter.push_back(arr_center[i*3+0]); vcenter.push_back(arr_center[i*3+1]); vcenter.push_back(arr_center[i*3+2]); vnameid.push_back(arr_nameid[i]); } } countline = vcenter.size()/3; memcpy(arr_center,vcenter.data(),sizeof(double)*countline*3); memcpy(arr_nameid,vnameid.data(),sizeof(int)*countline); //correction by thresholding angle vcenter.clear(); vcenter.push_back(arr_center[0]); vcenter.push_back(arr_center[1]); vcenter.push_back(arr_center[2]); vcenter.push_back(arr_center[3]); vcenter.push_back(arr_center[4]); vcenter.push_back(arr_center[5]); vnameid.clear(); vnameid.push_back(arr_nameid[0]); vnameid.push_back(arr_nameid[1]); double prevec[3]; prevec[0] = arr_center[3]-arr_center[0]; prevec[1] = arr_center[4]-arr_center[1]; prevec[2] = arr_center[5]-arr_center[2]; normalize(prevec,3); for (int i=2; i<countline; i++) { double curvec[3]; curvec[0] = arr_center[i*3+0]-arr_center[(i-1)*3+0]; curvec[1] = arr_center[i*3+1]-arr_center[(i-1)*3+1]; curvec[2] = arr_center[i*3+2]-arr_center[(i-1)*3+2]; normalize(curvec,3); double ang = computeAngle(prevec,curvec); if (ang>thresang) continue; memcpy(prevec,curvec,sizeof(double)*3); vcenter.push_back(arr_center[i*3+0]); vcenter.push_back(arr_center[i*3+1]); vcenter.push_back(arr_center[i*3+2]); vnameid.push_back(arr_nameid[i]); } //adding more vertices at the beginning and ending to have enough convolution points /* double firstpoint[3]; firstpoint[0] = vcenter[0]; firstpoint[1] = vcenter[1]; firstpoint[2] = vcenter[2]; int firstnameid = vnameid[0]; double lastpoint[3]; lastpoint[0] = vcenter[vcenter.size()-3]; lastpoint[1] = vcenter[vcenter.size()-2]; lastpoint[2] = vcenter[vcenter.size()-1]; int lastnameid = vnameid[vnameid.size()-1]; vcenter.insert(vcenter.begin(),firstpoint[2]); vcenter.insert(vcenter.begin(),firstpoint[1]); vcenter.insert(vcenter.begin(),firstpoint[0]); vnameid.insert(vnameid.begin(),firstnameid); vcenter.push_back(lastpoint[0]); vcenter.push_back(lastpoint[1]); vcenter.push_back(lastpoint[2]); vcenter.push_back(lastpoint[0]); vcenter.push_back(lastpoint[1]); vcenter.push_back(lastpoint[2]); vnameid.push_back(lastnameid); vnameid.push_back(lastnameid); */ printf("after correcting input\n"); countline = vcenter.size()/3; memcpy(arr_center,vcenter.data(),sizeof(double)*countline*3); memcpy(arr_nameid,vnameid.data(),sizeof(int)*countline); //clustering double disclus = 1.0; vector<vector<int>> vcluster; vector<double> vcenterclus; int i=0; while (i<countline) { vector<int> curclus; //curclus.push_back(arr_center[i*3]); //curclus.push_back(arr_center[i*3+1]); //curclus.push_back(arr_center[i*3+2]); curclus.push_back(i); while (i+1<countline) { if (diss2P(arr_center[i*3],arr_center[i*3+1],arr_center[i*3+2],arr_center[(i+1)*3],arr_center[(i+1)*3+1],arr_center[(i+1)*3+2])<disclus) { i++; //curclus.push_back(arr_center[i*3]); //curclus.push_back(arr_center[i*3+1]); //curclus.push_back(arr_center[i*3+2]); curclus.push_back(i); } else break; } i++; //compute center of this cluster int n = curclus.size(); double centerclus[3]; memset(centerclus,0,sizeof(double)*3); for (int j=0; j<n; j++) { centerclus[0] += arr_center[curclus[j]*3]; centerclus[1] += arr_center[curclus[j]*3+1]; centerclus[2] += arr_center[curclus[j]*3+2]; } vcenterclus.push_back(centerclus[0]/n); vcenterclus.push_back(centerclus[1]/n); vcenterclus.push_back(centerclus[2]/n); vcluster.push_back(curclus); } outdata = new short[size[0]*size[1]*countline]; cout<<"Initialized outdata"<<endl; int curnameind; float* filemem0 = NULL; float* filemem1 = NULL; int initalized = 0; double *imageDouble = NULL; int *d_dim; double *d_dir1; double *d_dir2; double *d_imageDouble; int *d_size; double *d_center; int count = 0; nin = nrrdNew(); Nrrd *ndblpng = nrrdNew(); float camfr[3], camat[3], camup[3], camnc, camfc, camFOV; int camortho; unsigned int camsize[2]; /* camfr[0] = arr_center[countline/2*3+0]; camfr[1] = arr_center[countline/2*3+1]; camfr[2] = arr_center[countline/2*3+2]-5; camat[0] = arr_center[countline/2*3+0]; camat[1] = arr_center[countline/2*3+1]; camat[2] = arr_center[countline/2*3+2]; camup[0] = 1; camup[1] = 0; camup[2] = 0; camnc = -10; camfc = 10; camFOV = 170; camortho = 1; camsize[0] = 500; camsize[1] = 500; */ //test synthetic data camfr[0] = arr_center[countline/2*3+0]; camfr[1] = arr_center[countline/2*3+1]; camfr[2] = arr_center[countline/2*3+2]-50; camat[0] = arr_center[countline/2*3+0]; camat[1] = arr_center[countline/2*3+1]; camat[2] = arr_center[countline/2*3+2]; camup[0] = 1; camup[1] = 0; camup[2] = 0; camnc = -100; camfc = 100; camFOV = 170; camortho = 1; camsize[0] = 500; camsize[1] = 500; //debug clicking /* camfr[0] = 0; camfr[1] = 1; camfr[2] = -6; camat[0] = 0; camat[1] = 1; camat[2] = -1; camup[0] = 1; camup[1] = 0; camup[2] = 0; camnc = -10; camfc = 10; camFOV = 90; camortho = 1; camsize[0] = 500; camsize[1] = 500; */ Hale::init(); //Hale::debugging = 1; Hale::Scene scene; /* then create viewer (in order to create the OpenGL context) */ Hale::Viewer viewer(camsize[0], camsize[1], "Viewer1", &scene); viewer.lightDir(glm::vec3(-1.0f, 1.0f, 3.0f)); viewer.camera.init(glm::vec3(camfr[0], camfr[1], camfr[2]), glm::vec3(camat[0], camat[1], camat[2]), glm::vec3(camup[0], camup[1], camup[2]), camFOV, (float)camsize[0]/camsize[1], camnc, camfc, camortho); viewer.current(); viewer.refreshCB((Hale::ViewerRefresher)render); viewer.refreshData(&viewer); Hale::Scene scene2; Hale::Viewer viewer2(camsize[0], camsize[1], "Viewer2", &scene2); viewer2.lightDir(glm::vec3(-1.0f, 1.0f, 3.0f)); viewer2.camera.init(glm::vec3(camfr[0], camfr[1], camfr[2]), glm::vec3(camat[0], camat[1], camat[2]), glm::vec3(camup[0], camup[1], camup[2]), camFOV, (float)camsize[0]/camsize[1], camnc, camfc, camortho); //viewer2.current(); //viewer2.refreshCB((Hale::ViewerRefresher)render); //viewer2.refreshData(&viewer2); //viewer.current(); printf("Initialized viewer\n"); Hale::Program *newprog = new Hale::Program("tex-vert-cpr.glsl","texdemo-frag.glsl"); newprog->compile(); newprog->bindAttribute(Hale::vertAttrIdxXYZW, "positionVA"); newprog->bindAttribute(Hale::vertAttrIdxRGBA, "colorVA"); newprog->bindAttribute(Hale::vertAttrIdxNorm, "normalVA"); newprog->bindAttribute(Hale::vertAttrIdxTex2, "tex2VA"); newprog->link(); double spherescale = 0.2; double spherescale_inter = 0.3; int density = 10; //how many points per one unit length in index-space int countls = 0; for (int i=1; i<countline-3; i++) { double dis = sqrt(diss2P(arr_center[i*3+0], arr_center[i*3+1], arr_center[i*3+2], arr_center[(i+1)*3+0], arr_center[(i+1)*3+1], arr_center[(i+1)*3+2])); countls += (dis*density); } int *ptotime = new int[countls]; double *ptofrac = new double[countls]; int *timetop = new int[countline]; memset(timetop,0,sizeof(int)*countline); limnPolyData *lpld3 = limnPolyDataNew(); limnPolyDataAlloc(lpld3, 0, countls, countls, 1); int cpointind = 0; for (int i=1; i<countline-3; i++) { double dis = sqrt(diss2P(arr_center[i*3+0], arr_center[i*3+1], arr_center[i*3+2], arr_center[(i+1)*3+0], arr_center[(i+1)*3+1], arr_center[(i+1)*3+2])); int countseg = dis*density; double tsep = 1.0/((double)countseg); timetop[i] = cpointind; for (int j=0; j<countseg; j++) { double curpoint[3]; if (kern==1) { for (int k=0; k<3; k++) curpoint[k] = cubicFilter<double>((double)j*tsep, arr_center[(i-1)*3+k], arr_center[(i)*3+k], arr_center[(i+1)*3+k], arr_center[(i+2)*3+k]); } else { for (int k=0; k<3; k++) curpoint[k] = ctmr((double)j*tsep, arr_center[(i-1)*3+k], arr_center[(i)*3+k], arr_center[(i+1)*3+k], arr_center[(i+2)*3+k]); } ELL_4V_SET(lpld3->xyzw + 4*cpointind, curpoint[0],curpoint[1],curpoint[2], 1.0); lpld3->indx[cpointind] = cpointind; ptotime[cpointind] = i; ptofrac[cpointind] = j*tsep; cpointind++; } } lpld3->type[0] = limnPrimitiveLineStrip; lpld3->icnt[0] = countls; printf("countls = %d\n", countls); //adding linestrip for original path limnPolyData *lpldorig = limnPolyDataNew(); limnPolyDataAlloc(lpldorig, 0, countline-3, countline-3, 1); for (int i=1; i<countline-2; i++) { ELL_4V_SET(lpldorig->xyzw + 4*(i-1), arr_center[(i)*3+0],arr_center[(i)*3+1],arr_center[(i)*3+2], 1.0); lpldorig->indx[i-1] = i-1; } lpldorig->type[0] = limnPrimitiveLineStrip; lpldorig->icnt[0] = countline-3; Hale::Polydata *hpldorig = new Hale::Polydata(lpldorig, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "LineStrip"); hpldorig->colorSolid(1.0,1.0,1.0); scene.add(hpldorig); limnPolyData *lpld4 = limnPolyDataNew(); limnPolyDataSpiralTubeWrap(lpld4, lpld3, 0, NULL, 10, 4, 0.1); Hale::Polydata *hpld3 = new Hale::Polydata(lpld3, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "LineStrip"); hpld3->colorSolid(1.0,0.0,0.0); Hale::Polydata *hpld4 = new Hale::Polydata(lpld4, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "SpiralTube"); hpld4->colorSolid(1.0,1.0,0.5); scene.add(hpld4); vector<Hale::Polydata *> vtexture; vector<Hale::Polydata *> vtexture2; vector<Hale::Polydata *> vsphere; vector<Hale::Polydata *> vsphereorig; unsigned char *imageQuantized; imageQuantized = new unsigned char[size[0]*size[1]*4]; double prevFT[3], prevFN[3], prevFB[3]; printf("countline after adding boundary points = %d\n", countline); printf("arr_nameid[1] = %d\n", arr_nameid[1]); printf("arr_nameid[countline-3] = %d\n", arr_nameid[countline-3]); printf("New nameid and centers:\n"); for (int i=0; i<countline; i++) printf("%d %f %f %f\n", arr_nameid[i], arr_center[i*3+0], arr_center[i*3+1], arr_center[i*3+2]); //computing PCA double cov[9]; computeCovariance(arr_center,countline,cov); double eigval[3],eigval2[3]; eigenOfHess(cov,eigval); ell_3m_eigenvalues_d(eigval2, cov, 1); double seigval = eigval[0], seigval2 = eigval2[0]; for (int i=1; i<3; i++) if (seigval>eigval[i]) seigval = eigval[i]; for (int i=1; i<3; i++) if (seigval2>eigval2[i]) seigval2 = eigval2[i]; double eigenvec[3],eigenvec2[3]; computeEigenVec(cov,seigval,eigenvec); computeEigenVec(cov,seigval2,eigenvec2); printf("eigenvalues are: %f, %f, %f\n",eigval[0],eigval[1],eigval[2]); printf("eigenvalues based on Teem's function are: %f, %f, %f\n",eigval2[0],eigval2[1],eigval2[2]); printf("eigenvector is (%f,%f,%f)\n", eigenvec[0],eigenvec[1],eigenvec[2]); printf("eigenvector based on eigenvalue of Teem is (%f,%f,%f)\n", eigenvec2[0],eigenvec2[1],eigenvec2[2]); memcpy(eigenvec,eigenvec2,sizeof(double)*3); int nOutChannel = 4; for (int clusind=1; clusind<vcluster.size()-2; clusind++) { double dr[3],ddr[3]; if (kern==1) { for (int i=0; i<3; i++) { dr[i] = cubicFilter_G<double>(0, vcenterclus[(clusind-1)*3+i], vcenterclus[(clusind)*3+i], vcenterclus[(clusind+1)*3+i], vcenterclus[(clusind+2)*3+i]); ddr[i] = cubicFilter_GG<double>(0, vcenterclus[(clusind-1)*3+i], vcenterclus[(clusind)*3+i], vcenterclus[(clusind+1)*3+i], vcenterclus[(clusind+2)*3+i]); } } else { for (int i=0; i<3; i++) { dr[i] = ctmr_g(0, vcenterclus[(clusind-1)*3+i], vcenterclus[(clusind)*3+i], vcenterclus[(clusind+1)*3+i], vcenterclus[(clusind+2)*3+i]); ddr[i] = ctmr_gg(0, vcenterclus[(clusind-1)*3+i], vcenterclus[(clusind)*3+i], vcenterclus[(clusind+1)*3+i], vcenterclus[(clusind+2)*3+i]); } } //for (count = 1; count<countline-2; count++) for (int cluselem = 0; cluselem<vcluster[clusind].size(); cluselem++) { count = vcluster[clusind][cluselem]; curnameind = arr_nameid[count]; /* for (int i=0; i<3; i++) { center[i] = cubicFilter<double>(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //center[i] = ctmr(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); printf("ctmr computation: x=%f, a0=%f, a1=%f, a2=%f, a3=%f -> res=%f\n", 0.0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i],center[i]); } printf("center = %f %f %f\n", center[0],center[1],center[2]); */ double FT[3]; double FN[3],FB[3]; /* for (int i=0; i<3; i++) dr[i] = cubicFilter_G<double>(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //dr[i] = ctmr_g(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); for (int i=0; i<3; i++) ddr[i] = cubicFilter_GG<double>(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //ddr[i] = ctmr_gg(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); */ if (kern==1) { for (int i=0; i<3; i++) { center[i] = cubicFilter<double>(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //dr[i] = cubicFilter_G<double>(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //ddr[i] = cubicFilter_GG<double>(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } else { for (int i=0; i<3; i++) { center[i] = ctmr(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //dr[i] = ctmr_g(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //ddr[i] = ctmr_gg(0, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } printf("dr = (%f,%f,%f)\n",dr[0],dr[1],dr[2]); printf("ddr = (%f,%f,%f)\n",ddr[0],ddr[1],ddr[2]); normalize(dr,3); normalize(ddr,3); printf("after normalizing\n"); printf("dr = (%f,%f,%f)\n",dr[0],dr[1],dr[2]); printf("ddr = (%f,%f,%f)\n",ddr[0],ddr[1],ddr[2]); memcpy(FT,dr,sizeof(double)*3); //double crossddrdr[3]; //cross(ddr,dr,crossddrdr); //cross(dr,crossddrdr,FN); memcpy(FN,eigenvec,sizeof(double)*3); normalize(FN,3); cross(FT,FN,FB); cross(FB,FT,FN); normalize(FN,3); normalize(FT,3); normalize(FB,3); memcpy(dir1,FN,sizeof(double)*3); memcpy(dir2,FB,sizeof(double)*3); printf("N = %f %f %f, B = %f %f %f, T = %f %f %f, dotNB = %f, dotNT = %f, dotBT = %f\n",FN[0],FN[1],FN[2],FB[0],FB[1],FB[2],FT[0],FT[1],FT[2], dotProduct(FN,FB,3),dotProduct(FN,FT,3),dotProduct(FB,FT,3)); if (count>1) { printf("count = %d\n", count); printf("angle of FT: %f\n", computeAngle(FT,prevFT)); printf("angle of FN: %f\n", computeAngle(FN,prevFN)); printf("angle of FB: %f\n", computeAngle(FB,prevFB)); } memcpy(prevFB,FB,sizeof(double)*3); memcpy(prevFN,FN,sizeof(double)*3); memcpy(prevFT,FT,sizeof(double)*3); limnPolyData *lpld = limnPolyDataNew(); limnPolyDataSquare(lpld, 1 << limnPolyDataInfoNorm | 1 << limnPolyDataInfoTex2); printf("after initializing lpld\n"); Hale::Polydata *hpld = new Hale::Polydata(lpld, true, NULL, "square"); hpld->program(newprog); glm::mat4 tmat = glm::mat4(); tmat[0][0] = FN[0]; tmat[0][1] = FN[1]; tmat[0][2] = FN[2]; tmat[0][3] = 0; tmat[1][0] = FB[0]; tmat[1][1] = FB[1]; tmat[1][2] = FB[2]; tmat[1][3] = 0; tmat[2][0] = FT[0]; tmat[2][1] = FT[1]; tmat[2][2] = FT[2]; tmat[2][3] = 0; tmat[3][0] = center[0]; tmat[3][1] = center[1]; tmat[3][2] = center[2]; tmat[3][3] = 1; glm::mat4 smat = glm::mat4(); smat[0][0] = 2; smat[1][1] = 2; glm::mat4 fmat = tmat*smat; hpld->model(fmat); //add a sphere limnPolyData *lpld2 = limnPolyDataNew(); limnPolyDataIcoSphere(lpld2, 1 << limnPolyDataInfoNorm, 3); Hale::Polydata *hpld2 = new Hale::Polydata(lpld2, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "IcoSphere"); hpld2->colorSolid(lerp(0,1,0,count,countline-1),lerp(1,0,0,count,countline-1),0.5); glm::mat4 fmat2 = glm::mat4(); fmat2[0][0] = spherescale; fmat2[1][1] = spherescale; fmat2[2][2] = spherescale; fmat2[3][0] = center[0]; fmat2[3][1] = center[1]; fmat2[3][2] = center[2]; fmat2[3][3] = 1; hpld2->model(fmat2); scene.add(hpld2); vsphere.push_back(hpld2); //adding sphere for original track path too limnPolyData *lpldorigsp = limnPolyDataNew(); limnPolyDataIcoSphere(lpldorigsp, 1 << limnPolyDataInfoNorm, 3); Hale::Polydata *hpldorigsp = new Hale::Polydata(lpldorigsp, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "IcoSphere"); hpldorigsp->colorSolid(lerp(0,1,0,count,countline-1),lerp(1,0,0,count,countline-1),0.5); fmat2[0][0] = spherescale; fmat2[1][1] = spherescale; fmat2[2][2] = spherescale; fmat2[3][0] = arr_center[(count)*3+0]; fmat2[3][1] = arr_center[(count)*3+1]; fmat2[3][2] = arr_center[(count)*3+2]; fmat2[3][3] = 1; hpldorigsp->model(fmat2); scene.add(hpldorigsp); vsphereorig.push_back(hpldorigsp); printf("after adding hpld to scene\n"); printf("added lpld\n"); cout<<"Before read in file, with curnameind = "<<curnameind<<", center = "<<center[0]<<" "<<center[1]<<" "<<center[2]<<endl; /* sprintf(inname,"%s/%d.nrrd",pathprefix,curnameind); cout<<"inname = "<<inname<<endl; if (nrrdLoad(nin, inname, NULL)) { err = biffGetDone(NRRD); fprintf(stderr, "%s: trouble reading \"%s\":\n%s", me, inname, err); free(err); return; } cout<<"read file "<<inname<<endl; unsigned int pixSize; cudaChannelFormatDesc channelDesc; pixSize = sizeof(float); channelDesc = cudaCreateChannelDesc<float>(); if (3 != nin->dim && 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 3D space, (not %uD in %uD)\n", argv[0], nin->dim, nin->spaceDim); airMopError(mop); exit(1); } double mat_trans[4][4]; mat_trans[3][0] = mat_trans[3][1] = mat_trans[3][2] = 0; mat_trans[3][3] = 1; int dim[4]; if (nin->dim == 3) { dim[0] = 1; dim[1] = nin->axis[0].size; dim[2] = nin->axis[1].size; dim[3] = nin->axis[2].size; for (int i=0; i<3; i++) { for (int j=0; j<3; j++) { mat_trans[j][i] = nin->axis[i].spaceDirection[j]; } mat_trans[i][3] = nin->spaceOrigin[i]; } } else //4-channel { dim[0] = nin->axis[0].size; dim[1] = nin->axis[1].size; dim[2] = nin->axis[2].size; dim[3] = nin->axis[3].size; for (int i=0; i<3; i++) { for (int j=0; j<3; j++) { mat_trans[j][i] = nin->axis[i+1].spaceDirection[j]; } mat_trans[i][3] = nin->spaceOrigin[i]; } } int channel = 1; if (!initalized) { filemem0 = new float[dim[1]*dim[2]*dim[3]]; filemem1 = new float[dim[1]*dim[2]*dim[3]]; } for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i*2]; filemem1[i] = ((short*)nin->data)[i*2+1]; } double mat_trans_inv[4][4]; invertMat44(mat_trans,mat_trans_inv); //tex3D stuff const cudaExtent volumeSize = make_cudaExtent(dim[1], dim[2], dim[3]); printf("Array size: %f MB\n", dim[1]*dim[2]*dim[3]*sizeof(float)/1024.0/1024.0); size_t free_byte ; size_t total_byte ; cudaError_t errCu; errCu = cudaMemGetInfo( &free_byte, &total_byte ) ; if ( cudaSuccess != errCu ){ printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(errCu) ); exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; printf("GPU memory usage (before copying memory to Device): used = %f MB, free = %f MB, total = %f MB\n", used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); if (!initalized) { cudaMalloc3DArray(&d_volumeArray[2], &channelDesc, volumeSize); } cudaMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_cudaPitchedPtr((void*)filemem0, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[2]; copyParams0.extent = volumeSize; copyParams0.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams0); */ size_t free_byte; size_t total_byte; double free_db; double total_db; double used_db; cudaError_t errCu; cudaChannelFormatDesc channelDesc; channelDesc = cudaCreateChannelDesc<float>(); //cudaArray* d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); cudaArray* d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex2.normalized = false; // access with normalized texture coordinates tex2.filterMode = cudaFilterModeLinear; // linear interpolation tex2.addressMode[0] = cudaAddressModeBorder; // wrap texture coordinates tex2.addressMode[1] = cudaAddressModeBorder; tex2.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex2, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex5.normalized = false; // access with normalized texture coordinates tex5.filterMode = cudaFilterModeLinear; // linear interpolation tex5.addressMode[0] = cudaAddressModeBorder; // wrap texture coordinates tex5.addressMode[1] = cudaAddressModeBorder; tex5.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex5, d_curvolarr, channelDesc); errCu = cudaMemGetInfo( &free_byte, &total_byte ) ; if ( cudaSuccess != errCu ){ printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(errCu) ); exit(1); } free_db = (double)free_byte ; total_db = (double)total_byte ; used_db = total_db - free_db ; printf("GPU memory usage (after copying memory to Device): used = %f MB, free = %f MB, total = %f MB\n", used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); nOutChannel = 4; //int dim[4]; //memcpy(dim,queue.getDataDim(),sizeof(int)*4); //printf("dim = %d,%d,%d,%d\n",dim[0],dim[1],dim[2],dim[3]); if (!initalized) { imageDouble = new double[size[0]*size[1]*nOutChannel]; errCu = cudaMalloc(&d_dim, 4*sizeof(int)); if ( cudaSuccess != errCu ){ printf("Error in Malloc of d_dim: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMemcpy(d_dim, queue.getDataDim(), 4*sizeof(int), cudaMemcpyHostToDevice); if ( cudaSuccess != errCu ){ printf("Error in memcpy of d_dim: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMalloc(&d_dir1, 3*sizeof(double)); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMalloc(&d_dir2, 3*sizeof(double)); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMalloc(&d_imageDouble,sizeof(double)*size[0]*size[1]*nOutChannel); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMalloc(&d_size,2*sizeof(int)); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMemcpy(d_size,size,2*sizeof(int), cudaMemcpyHostToDevice); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMalloc(&d_center,3*sizeof(double)); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } } errCu = cudaMemcpy(d_dir1, dir1, 3*sizeof(double), cudaMemcpyHostToDevice); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMemcpy(d_dir2, dir2, 3*sizeof(double), cudaMemcpyHostToDevice); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMemcpy(d_center,center,3*sizeof(double), cudaMemcpyHostToDevice); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } int numThread1D = 16; dim3 threadsPerBlock(numThread1D,numThread1D); dim3 numBlocks((size[0]+numThread1D-1)/numThread1D,(size[1]+numThread1D-1)/numThread1D); kernel_cpr<<<numBlocks,threadsPerBlock>>>(d_dim, d_size, verextent, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); //kernel_cpr_2chan<<<numBlocks,threadsPerBlock>>>(d_dim, d_size, verextent, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error After first kernel_cpr: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); unsigned short width = size[0]; unsigned short height = size[1]; copyImageChannel<double,short>(imageDouble,4,size[0],size[1],1,outdata+count*size[0]*size[1],1,0); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); //drawNCircle(imageQuantized,4,size[0],size[1],0, count, countline/2,countline/2); hpld->setTexture((char*)"myTextureSampler",(unsigned char *)imageQuantized,size[0],size[1],4); scene.add(hpld); vtexture.push_back(hpld); drawCircle(imageQuantized,4,size[0],size[1],0,size[0]/2,size[1]/2,20); double trackedcenter[3]; trackedcenter[0] = arr_center[count*3]; trackedcenter[1] = arr_center[count*3+1]; trackedcenter[2] = arr_center[count*3+2]; double centerdiff[3]; subtractVec(trackedcenter,center,centerdiff,3); double coorfn, coorfb, coorft; coorfn = dotProduct(centerdiff,FN,3); coorfb = dotProduct(centerdiff,FB,3); coorft = dotProduct(centerdiff,FT,3); unsigned char color[3]; color[0] = color[1] = color[2] = 128; if (coorft<-swidth/2 || coorft>swidth/2) { color[0] = color[1] = color[2] = 0; } else { color[0] = lerp(255,0,-swidth/2,coorft,swidth/2); color[1] = lerp(0,0,-swidth/2,coorft,swidth/2); color[2] = lerp(0,255,-swidth/2,coorft,swidth/2); } drawCrossWithColor(imageQuantized,4,size[0],size[1],size[0]/2+coorfn*size[1]/verextent,size[1]/2+coorfb*size[1]/verextent,20,color); initalized = 1; sprintf(outnameslice,"cpr_seq_%d.png",curnameind); //if (nrrdWrap_va(ndblpng, imageQuantized, nrrdTypeUChar, 3, 4, width, height) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // printf("error in saving cpr_seq_X.png, width=%d, height=%d\n",width,height); // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } } } /* //testing synthetic data----------------------------- printf("before testing synthetic data\n"); { size_t free_byte; size_t total_byte; double free_db; double total_db; double used_db; int count = 5; dir1[0] = 1; dir1[1] = 0; dir1[2] = 0; dir2[0] = 0; dir2[1] = 1; dir2[2] = 0; center[0] = queue.getDataDim()[1]/2; center[1] = queue.getDataDim()[2]/2; center[2] = queue.getDataDim()[3]/2; cudaError_t errCu; cudaChannelFormatDesc channelDesc; channelDesc = cudaCreateChannelDesc<float>(); //cudaArray* d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); cudaArray* d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex2.normalized = false; // access with normalized texture coordinates tex2.filterMode = cudaFilterModeLinear; // linear interpolation tex2.addressMode[0] = cudaAddressModeBorder; // wrap texture coordinates tex2.addressMode[1] = cudaAddressModeBorder; tex2.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex2, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex5.normalized = false; // access with normalized texture coordinates tex5.filterMode = cudaFilterModeLinear; // linear interpolation tex5.addressMode[0] = cudaAddressModeBorder; // wrap texture coordinates tex5.addressMode[1] = cudaAddressModeBorder; tex5.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex5, d_curvolarr, channelDesc); errCu = cudaMemGetInfo( &free_byte, &total_byte ) ; if ( cudaSuccess != errCu ){ printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(errCu) ); exit(1); } free_db = (double)free_byte ; total_db = (double)total_byte ; used_db = total_db - free_db ; printf("GPU memory usage (after copying memory to Device): used = %f MB, free = %f MB, total = %f MB\n", used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); nOutChannel = 4; //int dim[4]; //memcpy(dim,queue.getDataDim(),sizeof(int)*4); //printf("dim = %d,%d,%d,%d\n",dim[0],dim[1],dim[2],dim[3]); if (!initalized) { imageDouble = new double[size[0]*size[1]*nOutChannel]; errCu = cudaMalloc(&d_dim, 4*sizeof(int)); if ( cudaSuccess != errCu ){ printf("Error in Malloc of d_dim: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMemcpy(d_dim, queue.getDataDim(), 4*sizeof(int), cudaMemcpyHostToDevice); if ( cudaSuccess != errCu ){ printf("Error in memcpy of d_dim: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMalloc(&d_dir1, 3*sizeof(double)); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMalloc(&d_dir2, 3*sizeof(double)); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMalloc(&d_imageDouble,sizeof(double)*size[0]*size[1]*nOutChannel); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMalloc(&d_size,2*sizeof(int)); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMemcpy(d_size,size,2*sizeof(int), cudaMemcpyHostToDevice); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMalloc(&d_center,3*sizeof(double)); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } } errCu = cudaMemcpy(d_dir1, dir1, 3*sizeof(double), cudaMemcpyHostToDevice); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMemcpy(d_dir2, dir2, 3*sizeof(double), cudaMemcpyHostToDevice); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMemcpy(d_center,center,3*sizeof(double), cudaMemcpyHostToDevice); if ( cudaSuccess != errCu ){ printf("Error in Malloc or memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } int numThread1D = 16; dim3 threadsPerBlock(numThread1D,numThread1D); dim3 numBlocks((size[0]+numThread1D-1)/numThread1D,(size[1]+numThread1D-1)/numThread1D); //swidth = queue.getDataDim()[3]; //verextent-=100; kernel_peak<<<numBlocks,threadsPerBlock>>>(d_dim, d_size, verextent, d_center, d_dir1, d_dir2, queue.getDataDim()[3], 0.1, nOutChannel, d_imageDouble); //kernel_cpr<<<numBlocks,threadsPerBlock>>>(d_dim, d_size, verextent, d_center, d_dir1, d_dir2, queue.getDataDim()[3], 0.05, nOutChannel, d_imageDouble); //kernel_cpr_2chan<<<numBlocks,threadsPerBlock>>>(d_dim, d_size, verextent, d_center, d_dir1, d_dir2, queue.getDataDim()[3], 0.1, nOutChannel, d_imageDouble); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error After first kernel_cpr: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); short width = size[0]; short height = size[1]; copyImageChannel<double,short>(imageDouble,4,size[0],size[1],1,outdata+count*size[0]*size[1],1,0); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); initalized = 1; sprintf(outnameslice,"synthetic.png"); if (nrrdWrap_va(ndblpng, imageQuantized, nrrdTypeUChar, 3, 4, width, height) || nrrdSave(outnameslice, ndblpng, NULL) ) { char *err = biffGetDone(NRRD); printf("%s: couldn't save output:\n%s", argv[0], err); free(err); nrrdNix(ndblpng); exit(1); } } printf("finished writing test result\n"); return; //end of testing for synthetic data------------------ */ cout<<"Before allocating output nrrd"<<endl; Nrrd *ndbl = nrrdNew(); //cout<<"Before saving output nrrd"<<endl; //if (nrrdWrap_va(ndbl, outdata, nrrdTypeShort, 3, size[0], size[1], countline) // || nrrdSave(outname,ndbl,NULL) // ) //{ // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndbl); // exit(1); //} viewer2.current(); printf("after setting viewer2.current()\n"); limnPolyData *lpldview2 = limnPolyDataNew(); limnPolyDataSquare(lpldview2, 1 << limnPolyDataInfoNorm | 1 << limnPolyDataInfoTex2); Hale::Polydata *hpldview2 = new Hale::Polydata(lpldview2, true, NULL,//Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "square"); Hale::Program *newprog2 = new Hale::Program("texdemo-vert.glsl","texdemo-frag.glsl"); //Hale::Program *newprog2 = new Hale::Program("tex-vert-cpr.glsl","texdemo-frag.glsl"); newprog2->compile(); newprog2->bindAttribute(Hale::vertAttrIdxXYZW, "positionVA"); newprog2->bindAttribute(Hale::vertAttrIdxRGBA, "colorVA"); newprog2->bindAttribute(Hale::vertAttrIdxNorm, "normalVA"); newprog2->bindAttribute(Hale::vertAttrIdxTex2, "tex2VA"); newprog2->link(); hpldview2->program(newprog2); //find lerping between 2 volumes count = 1; /* curnameind = arr_nameid[count]; sprintf(inname,"%s/%d.nrrd",pathprefix,curnameind); if (nrrdLoad(nin, inname, NULL)) { err = biffGetDone(NRRD); fprintf(stderr, "%s: trouble reading \"%s\":\n%s", me, inname, err); free(err); return; } cout<<"read file "<<inname<<endl; unsigned int pixSize; cudaChannelFormatDesc channelDesc; pixSize = sizeof(float); channelDesc = cudaCreateChannelDesc<float>(); if (3 != nin->dim && 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 3D space, (not %uD in %uD)\n", argv[0], nin->dim, nin->spaceDim); airMopError(mop); exit(1); } int dim[4]; if (nin->dim == 3) { dim[0] = 1; dim[1] = nin->axis[0].size; dim[2] = nin->axis[1].size; dim[3] = nin->axis[2].size; } else //4-channel { dim[0] = nin->axis[0].size; dim[1] = nin->axis[1].size; dim[2] = nin->axis[2].size; dim[3] = nin->axis[3].size; } int channel = 1; if (!filemem0) { filemem0 = new float[dim[1]*dim[2]*dim[3]]; filemem1 = new float[dim[1]*dim[2]*dim[3]]; } for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i*2]; filemem1[i] = ((short*)nin->data)[i*2+1]; } //debug for (int k=0; k<=2; k++) for (int j=0; j<=2; j++) for (int i=0; i<=2; i++) printf("volume 1: at (%d,%d,%d) = %f\n", i,j,k,filemem0[k*dim[1]*dim[2]+j*dim[1]+i]); const cudaExtent volumeSize = make_cudaExtent(dim[1], dim[2], dim[3]); if (!d_volumeArray[0]) cudaMalloc3DArray(&d_volumeArray[0], &channelDesc, volumeSize); cudaMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_cudaPitchedPtr((void*)filemem0, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[0]; copyParams0.extent = volumeSize; copyParams0.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams0); */ cudaChannelFormatDesc channelDesc; channelDesc = cudaCreateChannelDesc<float>(); //cudaArray* d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); cudaArray* d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex0.normalized = false; tex0.filterMode = cudaFilterModeLinear; tex0.addressMode[0] = cudaAddressModeBorder; tex0.addressMode[1] = cudaAddressModeBorder; tex0.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex0, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex3.normalized = false; tex3.filterMode = cudaFilterModeLinear; tex3.addressMode[0] = cudaAddressModeBorder; tex3.addressMode[1] = cudaAddressModeBorder; tex3.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex3, d_curvolarr, channelDesc); //read second file count = 2; /* curnameind = arr_nameid[count]; sprintf(inname,"%s/%d.nrrd",pathprefix,curnameind); if (nrrdLoad(nin, inname, NULL)) { err = biffGetDone(NRRD); fprintf(stderr, "%s: trouble reading \"%s\":\n%s", me, inname, err); free(err); return; } cout<<"read file "<<inname<<endl; if (3 != nin->dim && 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 3D space, (not %uD in %uD)\n", argv[0], nin->dim, nin->spaceDim); airMopError(mop); exit(1); } for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i*2]; filemem1[i] = ((short*)nin->data)[i*2+1]; } if (!d_volumeArray[1]) cudaMalloc3DArray(&d_volumeArray[1], &channelDesc, volumeSize); cudaMemcpy3DParms copyParams1 = {0}; copyParams1.srcPtr = make_cudaPitchedPtr((void*)filemem0, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray[1]; copyParams1.extent = volumeSize; copyParams1.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams1); */ //d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex1.normalized = false; tex1.filterMode = cudaFilterModeLinear; tex1.addressMode[0] = cudaAddressModeBorder; tex1.addressMode[1] = cudaAddressModeBorder; tex1.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex1, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex4.normalized = false; tex4.filterMode = cudaFilterModeLinear; tex4.addressMode[0] = cudaAddressModeBorder; tex4.addressMode[1] = cudaAddressModeBorder; tex4.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex4, d_curvolarr, channelDesc); int curinterp = (timetop[1]+timetop[2])/2; double alpha = ptofrac[curinterp]; cudaError_t errCu; float *d_volmem, *d_volmem2; int *dim = queue.getDataDim(); const cudaExtent volumeSize = make_cudaExtent(dim[1], dim[2], dim[3]); int pixSize = sizeof(float); cudaMalloc(&d_volmem,sizeof(float)*dim[1]*dim[2]*dim[3]); cudaMalloc(&d_volmem2,sizeof(float)*dim[1]*dim[2]*dim[3]); int numThread1D = 8; dim3 threadsPerBlock(numThread1D,numThread1D,numThread1D); dim3 numBlocks((dim[1]+numThread1D-1)/numThread1D,(dim[2]+numThread1D-1)/numThread1D,(dim[3]+numThread1D-1)/numThread1D); //kernel_interpol<<<numBlocks,threadsPerBlock>>>(d_volmem,d_dim,alpha); kernel_interpol2<<<numBlocks,threadsPerBlock>>>(d_volmem,d_volmem2,d_dim,alpha); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); if (!d_volumeArray[NTEX]) { cudaMalloc3DArray(&d_volumeArray[NTEX], &channelDesc, volumeSize); cudaMalloc3DArray(&d_volumeArray1[NTEX], &channelDesc, volumeSize); printf("d_volumeArray[NTEX] allocated\n"); } errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error after allocating d_volumeArray[NTEX]: %s\n", cudaGetErrorString(errCu)); cudaMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_cudaPitchedPtr((void*)d_volmem, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[NTEX]; copyParams0.extent = volumeSize; copyParams0.kind = cudaMemcpyDeviceToDevice; cudaMemcpy3D(&copyParams0); cudaMemcpy3DParms copyParams1 = {0}; copyParams1.srcPtr = make_cudaPitchedPtr((void*)d_volmem2, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray1[NTEX]; copyParams1.extent = volumeSize; copyParams1.kind = cudaMemcpyDeviceToDevice; cudaMemcpy3D(&copyParams1); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error after copying mem from d_volmem to d_volumeArray[NTEX]: %s\n", cudaGetErrorString(errCu)); //cudaArray* d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); tex2.normalized = false; tex2.filterMode = cudaFilterModeLinear; tex2.addressMode[0] = cudaAddressModeBorder; tex2.addressMode[1] = cudaAddressModeBorder; tex2.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex2, d_volumeArray[NTEX], channelDesc); tex5.normalized = false; tex5.filterMode = cudaFilterModeLinear; tex5.addressMode[0] = cudaAddressModeBorder; tex5.addressMode[1] = cudaAddressModeBorder; tex5.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex5, d_volumeArray1[NTEX], channelDesc); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error after copying mem from d_volmem to tex2: %s\n", cudaGetErrorString(errCu)); count = 1; /* for (int i=0; i<3; i++) center[i] = cubicFilter<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //center[i] = ctmr(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); printf("center = %f %f %f\n", center[0],center[1],center[2]); */ double FT[3]; double FN[3],FB[3]; double dr[3],ddr[3]; /* for (int i=0; i<3; i++) dr[i] = cubicFilter_G<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //dr[i] = ctmr_g(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); for (int i=0; i<3; i++) ddr[i] = cubicFilter_GG<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //ddr[i] = ctmr_gg(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); */ if (kern==1) { for (int i=0; i<3; i++) { center[i] = cubicFilter<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); dr[i] = cubicFilter_G<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); ddr[i] = cubicFilter_GG<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } else { for (int i=0; i<3; i++) { center[i] = ctmr(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); dr[i] = ctmr_g(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); ddr[i] = ctmr_gg(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } printf("dr = (%f,%f,%f)\n",dr[0],dr[1],dr[2]); printf("ddr = (%f,%f,%f)\n",ddr[0],ddr[1],ddr[2]); normalize(dr,3); normalize(ddr,3); printf("after normalizing\n"); printf("dr = (%f,%f,%f)\n",dr[0],dr[1],dr[2]); printf("ddr = (%f,%f,%f)\n",ddr[0],ddr[1],ddr[2]); memcpy(FT,dr,sizeof(double)*3); memcpy(FN,eigenvec,sizeof(double)*3); normalize(FN,3); cross(FT,FN,FB); cross(FB,FT,FN); memcpy(dir1,FN,sizeof(double)*3); memcpy(dir2,FB,sizeof(double)*3); printf("Interpolation: N = %f %f %f, B = %f %f %f, T = %f %f %f, dotNB = %f, dotNT = %f, dotBT = %f\n",FN[0],FN[1],FN[2],FB[0],FB[1],FB[2],FT[0],FT[1],FT[2], dotProduct(FN,FB,3),dotProduct(FN,FT,3),dotProduct(FB,FT,3)); //cudaMemcpy(d_dir1, dir1, 3*sizeof(double), cudaMemcpyHostToDevice); //cudaMemcpy(d_dir2, dir2, 3*sizeof(double), cudaMemcpyHostToDevice); //cudaMemcpy(d_center,center,3*sizeof(double), cudaMemcpyHostToDevice); errCu = cudaMemcpy(d_dir1, dir1, 3*sizeof(double), cudaMemcpyHostToDevice); if ( cudaSuccess != errCu ){ printf("Error in memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMemcpy(d_dir2, dir2, 3*sizeof(double), cudaMemcpyHostToDevice); if ( cudaSuccess != errCu ){ printf("Error in memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaMemcpy(d_center, center, 3*sizeof(double), cudaMemcpyHostToDevice); if ( cudaSuccess != errCu ){ printf("Error in memcpy: %s \n", cudaGetErrorString(errCu) ); exit(1); } errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error before kernel_cpr of the first interpolated point: %s\n", cudaGetErrorString(errCu)); numThread1D = 16; dim3 threadsPerBlock2(numThread1D,numThread1D); dim3 numBlocks2((size[0]+numThread1D-1)/numThread1D,(size[1]+numThread1D-1)/numThread1D); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error before kernel_cpr of the first interpolated point, after allocating blocksize: %s\n", cudaGetErrorString(errCu)); kernel_cpr<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); //kernel_cpr_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error in kernel_cpr of the first interpolated point: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); //save nrrd file to test sprintf(outnameslice,"test.nrrd"); //if (nrrdWrap_va(ndblpng, imageDouble, nrrdTypeDouble, 3, 4, size[0], size[1]) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } short width = size[0]; short height = size[1]; //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); hpldview2->setTexture((char*)"myTextureSampler",(unsigned char *)imageQuantized,size[0],size[1],4); //add the MIP in the interpolated position viewer.current(); limnPolyData *lpld_sq_inter = limnPolyDataNew(); limnPolyDataSquare(lpld_sq_inter, 1 << limnPolyDataInfoNorm | 1 << limnPolyDataInfoTex2); Hale::Polydata *hpld_sq_inter = new Hale::Polydata(lpld_sq_inter, true, NULL, "square"); Hale::Program *newprog3 = new Hale::Program("tex-vert-cpr.glsl","texdemo-frag.glsl"); //Hale::Program *newprog3 = new Hale::Program("texdemo-vert.glsl","texdemo-frag.glsl"); newprog3->compile(); newprog3->bindAttribute(Hale::vertAttrIdxXYZW, "positionVA"); newprog3->bindAttribute(Hale::vertAttrIdxRGBA, "colorVA"); newprog3->bindAttribute(Hale::vertAttrIdxNorm, "normalVA"); newprog3->bindAttribute(Hale::vertAttrIdxTex2, "tex2VA"); newprog3->link(); hpld_sq_inter->program(newprog3); glm::mat4 tmat_sq_inter = glm::mat4(); tmat_sq_inter[0][0] = FN[0]; tmat_sq_inter[0][1] = FN[1]; tmat_sq_inter[0][2] = FN[2]; tmat_sq_inter[0][3] = 0; tmat_sq_inter[1][0] = FB[0]; tmat_sq_inter[1][1] = FB[1]; tmat_sq_inter[1][2] = FB[2]; tmat_sq_inter[1][3] = 0; tmat_sq_inter[2][0] = FT[0]; tmat_sq_inter[2][1] = FT[1]; tmat_sq_inter[2][2] = FT[2]; tmat_sq_inter[2][3] = 0; tmat_sq_inter[3][0] = center[0]; tmat_sq_inter[3][1] = center[1]; tmat_sq_inter[3][2] = center[2]; tmat_sq_inter[3][3] = 1; glm::mat4 smat_sq_inter = glm::mat4(); smat_sq_inter[0][0] = 2; smat_sq_inter[1][1] = 2; glm::mat4 fmat_sq_inter = tmat_sq_inter*smat_sq_inter; hpld_sq_inter->model(fmat_sq_inter); hpld_sq_inter->setTexture((char*)"myTextureSampler",(unsigned char *)imageQuantized,size[0],size[1],4); scene.add(hpld_sq_inter); //add a sphere for the interpolated position limnPolyData *lpld2 = limnPolyDataNew(); limnPolyDataIcoSphere(lpld2, 1 << limnPolyDataInfoNorm, 3); Hale::Polydata *hpld_inter = new Hale::Polydata(lpld2, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "IcoSphere"); hpld_inter->colorSolid(0,0,1.0); glm::mat4 fmat2 = glm::mat4(); fmat2[0][0] = spherescale_inter; fmat2[1][1] = spherescale_inter; fmat2[2][2] = spherescale_inter; fmat2[3][0] = center[0]; fmat2[3][1] = center[1]; fmat2[3][2] = center[2]; fmat2[3][3] = 1; printf("center of the first interpolated point: %f,%f,%f\n", center[0],center[1],center[2]); hpld_inter->model(fmat2); scene.add(hpld_inter); viewer2.current(); curVolInMem = 1; scene2.add(hpldview2); //scene2.add(hpld3); scene2.drawInit(); printf("after adding to scene2 and drawInit()\n"); viewer2.verbose(3); render(&viewer2); printf("after rendering viewer2\n"); viewer.current(); viewer.verbose(3); //add a testing sphere /* viewer.current(); limnPolyData *lpldtestsphere = limnPolyDataNew(); limnPolyDataIcoSphere(lpldtestsphere, 1 << limnPolyDataInfoNorm, 3); Hale::Polydata *hpldtestsphere = new Hale::Polydata(lpldtestsphere, true, Hale::ProgramLib(Hale::preprogramAmbDiffSolid), "IcoSphere"); hpldtestsphere->colorSolid(1.0,0,0); glm::mat4 fmat3 = glm::mat4(); fmat3[0][0] = 1; fmat3[1][1] = 1; fmat3[2][2] = 1; fmat3[3][0] = 4; fmat3[3][1] = 5; fmat3[3][2] = 5; fmat3[3][3] = 1; hpldtestsphere->model(fmat3); scene.add(hpldtestsphere); */ ///////////////////////////////// cout<<"After saving output nrrd"<<endl; scene.drawInit(); printf("after scene.drawInit()\n"); render(&viewer); printf("after render(&viewer)\n"); //getting Z-buffer printf("viewer: width = %d, height = %d, widthBuffer = %d, heightBuffer = %d\n", viewer.width(),viewer.height(),viewer.widthBuffer(),viewer.heightBuffer()); GLfloat* zbuffer = new GLfloat[viewer.widthBuffer()*viewer.heightBuffer()]; glReadPixels(0,0,viewer.widthBuffer(),viewer.heightBuffer(),GL_DEPTH_COMPONENT,GL_FLOAT,zbuffer); printf("Z-buffer\n"); int wposw = 56; int hposw = 62; double dposw = zbuffer[hposw*viewer.widthBuffer()+wposw]; printf("Before converting: wpos = %d, hpos = %d, dpos = %f\n", wposw,hposw,dposw); glm::vec4 wposworld = convertDepthBuffToWorldPos(wposw,hposw,dposw,&viewer); printf("After converting: x,y,z = %f,%f,%f\n", wposworld.x,wposworld.y,wposworld.z); glm::vec4 vpostest,wpostest; wpostest.x = arr_center[3]; wpostest.y = arr_center[4]; wpostest.z = arr_center[5]; wpostest.w = 1; vpostest = viewer.camera.view()*wpostest; printf("World Pos Test = %f,%f,%f; View Pos Test = %f,%f,%f\n", wpostest.x,wpostest.y,wpostest.z,vpostest.x,vpostest.y,vpostest.z); float minz=1000,maxz=-1000; for (int i=0; i<viewer.widthBuffer()*viewer.heightBuffer(); i++) { zbuffer[i] = linearizeDepthOrtho(lerp(-1,1,0,zbuffer[i],1),viewer.camera.clipNear(),viewer.camera.clipFar()); if (zbuffer[i]<minz) minz = zbuffer[i]; if (zbuffer[i]>maxz) maxz = zbuffer[i]; } printf("minmaxz = (%f,%f)\n",minz,maxz ); saveImage<GLfloat>(viewer.widthBuffer(),viewer.heightBuffer(),1,zbuffer,"depth.tga"); bool stateBKey = false; bool stateMKey = false; bool stateNKey = false; bool stateZoom = false; bool stateXKey = false; bool statePKey = false; bool stateDKey = false; bool stateFKey = false; bool stateIKey = false; double lastX, lastY; double verextent2 = verextent; bool isHoldOn = false; bool checkPath = false; int stateBKeyInt = 0; GLfloat* zbufferC = new GLfloat[viewer.widthBuffer()*viewer.heightBuffer()]; unsigned char *zbufferM = new unsigned char[viewer.widthBuffer()*viewer.heightBuffer()]; Nrrd *zbufferNin = nrrdNew(); Nrrd *zbufferDis = nrrdNew(); //saving approximately equidistant images for constructing space-time visualization /* for (int curptmp = 0; curptmp<countls; curptmp++) { interpolVolAndRender(curVolInMem, ptotime[curptmp], ptofrac[curptmp], queue, arr_nameid, arr_center, pathprefix, mop, pixSize, dim, size, eigenvec, verextent2, swidth, sstep, nOutChannel, d_volmem, d_dim, d_size, d_dir1, d_dir2, d_center, imageDouble, d_imageDouble, imageQuantized, viewer, viewer2, hpldview2, hpld_inter, spherescale_inter, hpld_sq_inter); sprintf(outnameslice,"spacetime_hp/im_%d.png",curptmp); if (nrrdWrap_va(ndblpng, imageQuantized, nrrdTypeUChar, 3, 4, width, height) || nrrdSave(outnameslice, ndblpng, NULL) ) { char *err = biffGetDone(NRRD); printf("%s: couldn't save output:\n%s", argv[0], err); free(err); nrrdNix(ndblpng); exit(1); } } */ int tmpcount = 0; //clock_t begin = clock(); time_t start,end,starti,endi; time (&start); time (&starti); //used for rotating the axis in the second window glm::vec3 preFrom = viewer2.camera.from(); glm::vec3 preAt = viewer2.camera.at(); glm::vec3 preUp = viewer2.camera.up(); //considers (from,at) as y, up as z, and x is side vector (right-handed coordinates) glm::vec3 preZ = glm::normalize(preUp); glm::vec3 preY = glm::normalize(preAt-preFrom); glm::vec3 preX = glm::cross(preY,preZ); //inverse (transpose) of the coordinates (glm matrix is initialized by column-major) glm::mat3 preCoI = glm::mat3(preX[0],preY[0],preZ[0], preX[1],preY[1],preZ[1], preX[2],preY[2],preZ[2]); while(!Hale::finishing){ glfwWaitEvents(); int keyPressed = viewer.getKeyPressed(); int keyPressed2 = viewer2.getKeyPressed(); if (keyPressed == GLFW_KEY_LEFT) { if (curinterp>0) { curinterp--; interpolVolAndRender(curVolInMem, ptotime[curinterp], ptofrac[curinterp], queue, arr_nameid, arr_center, pathprefix, mop, pixSize, dim, size, eigenvec, verextent2, swidth, sstep, nOutChannel, d_volmem, d_volmem2, d_dim, d_size, d_dir1, d_dir2, d_center, imageDouble, d_imageDouble, imageQuantized, viewer, viewer2, hpldview2, hpld_inter, spherescale_inter, hpld_sq_inter, statePKey, kern, stateIKey); } } else if (keyPressed == GLFW_KEY_RIGHT) { if (curinterp<countls-1) { curinterp++; interpolVolAndRender(curVolInMem, ptotime[curinterp], ptofrac[curinterp], queue, arr_nameid, arr_center, pathprefix, mop, pixSize, dim, size, eigenvec, verextent2, swidth, sstep, nOutChannel, d_volmem, d_volmem2, d_dim, d_size, d_dir1, d_dir2, d_center, imageDouble, d_imageDouble, imageQuantized, viewer, viewer2, hpldview2, hpld_inter, spherescale_inter, hpld_sq_inter, statePKey, kern, stateIKey); } } if (stateBKey!=viewer.getStateBKey()) { stateBKey = viewer.getStateBKey(); stateBKeyInt = (stateBKeyInt+1)%3; if (stateBKeyInt == 1) { scene.remove(hpld4); scene.add(hpld3); } else if (stateBKeyInt == 2) { scene.remove(hpld3); } else { //scene.remove(hpld3); scene.add(hpld4); } } if (keyPressed == 'M') { stateMKey = !stateMKey; if (stateMKey) { for (int i=0; i<vtexture.size(); i++) scene.remove(vtexture[i]); } else { for (int i=0; i<vtexture.size(); i++) scene.add(vtexture[i]); } } if (keyPressed == 'D') { stateDKey = !stateDKey; if (stateDKey) { scene.remove(hpld_inter); } else { scene.add(hpld_inter); } } if (keyPressed == 'F') { stateFKey = !stateFKey; if (stateFKey) { scene.remove(hpld_sq_inter); } else { scene.add(hpld_sq_inter); } } if (keyPressed == 'N') { stateNKey = !stateNKey; if (stateNKey) { for (int i=0; i<vsphereorig.size(); i++) scene.remove(vsphereorig[i]); scene.remove(hpldorig); } else { for (int i=0; i<vsphereorig.size(); i++) scene.add(vsphereorig[i]); scene.add(hpldorig); } } if (keyPressed == 'X') { stateXKey = !stateXKey; if (stateXKey) { for (int i=0; i<vsphere.size(); i++) scene.remove(vsphere[i]); } else { for (int i=0; i<vsphere.size(); i++) scene.add(vsphere[i]); } } if (keyPressed2 == 'I') { time (&endi); double dif = difftime (endi,starti); if (dif>0.3) { time (&starti); stateIKey = !stateIKey; if (statePKey) { printf("statePKey = True, doing kernel_peak, tmpcount = %d\n",tmpcount); if (stateIKey) kernel_peak_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_peak<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); //debug //sprintf(outnameslice,"test_peak.nrrd"); //if (nrrdWrap_va(ndblpng, imageDouble, nrrdTypeDouble, 3, 4, size[0], size[1]) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } else { printf("statePKey = false, doing kernel_cpr, tmpcount = %d\n",tmpcount); if (stateIKey) kernel_cpr_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_cpr<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } /* if (!statePKey) { if (stateIKey) kernel_cpr_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_cpr<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } else { if (stateIKey) kernel_peak_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_peak<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); //} */ } } //switching between maxima and MIP in window2 if (keyPressed2 == 'P') { tmpcount++; //clock_t end = clock(); //double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; time (&end); double dif = difftime (end,start); printf("dif time = %f\n", dif); if (dif>0.3) { time (&start); statePKey = !statePKey; printf("statePKey = %d\n",statePKey); if (statePKey) { printf("statePKey = True, doing kernel_peak, tmpcount = %d\n",tmpcount); if (stateIKey) kernel_peak_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_peak<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); //debug //sprintf(outnameslice,"test_peak.nrrd"); //if (nrrdWrap_va(ndblpng, imageDouble, nrrdTypeDouble, 3, 4, size[0], size[1]) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } else { printf("statePKey = false, doing kernel_cpr, tmpcount = %d\n",tmpcount); if (stateIKey) kernel_cpr_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_cpr<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } } } //increasing & decreasing width of the slab if (keyPressed2 == GLFW_KEY_MINUS || keyPressed2 == GLFW_KEY_EQUAL) { tmpcount++; //clock_t end = clock(); //double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; time (&end); double dif = difftime (end,start); printf("dif time = %f\n", dif); if (dif>0.3) { time (&start); if (keyPressed2 == GLFW_KEY_MINUS) swidth-=2; else swidth+=2; if (statePKey) { if (stateIKey) kernel_peak_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_peak<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); //debug sprintf(outnameslice,"test_peak.nrrd"); //if (nrrdWrap_va(ndblpng, imageDouble, nrrdTypeDouble, 3, 4, size[0], size[1]) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } else { if (stateIKey) kernel_cpr_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_cpr<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } } } //processing zooming in the second window (MIP image) if (stateZoom) { if (!viewer2.getButton(0)) { /* double curY = viewer2.getLastY(); int heightBuff = viewer2.heightBuffer(); double pcent = (curY-lastY)/heightBuff; printf("percent zoom = %f (curY = %f, lastY = %f, heightBuff = %d)\n", pcent, curY,lastY,heightBuff); stateZoom = false; verextent2 = verextent2*(1+pcent); kernel_cpr<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); */ stateZoom = false; } else if (std::isnan(lastY)) lastY = viewer2.getLastY(); else { double curY = viewer2.getLastY(); int heightBuff = viewer2.heightBuffer(); double pcent = (curY-lastY)/heightBuff; printf("percent zoom = %f (curY = %f, lastY = %f, heightBuff = %d)\n", pcent, curY,lastY,heightBuff); //stateZoom = false; verextent2 = verextent2*(1+pcent); if (statePKey) { // kernel_peak<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); if (stateIKey) kernel_peak_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_peak<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } else { if (stateIKey) kernel_cpr_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_cpr<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); if (statePKey) quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); else quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); lastY = curY; } } else if (viewer2.getButton(0) && viewer2.getMode()==Hale::viewerModeZoom) { stateZoom = true; lastY = viewer2.getLastY(); printf("Begin zooming: lastY = %f\n",lastY); } if (viewer.getButton(0)) { if (!isHoldOn) { glReadPixels(0,0,viewer.widthBuffer(),viewer.heightBuffer(),GL_DEPTH_COMPONENT,GL_FLOAT,zbufferC); //testing // sprintf(outnameslice,"depth.nrrd"); // if (nrrdWrap_va(ndblpng, zbufferC, nrrdTypeFloat, 2, viewer.widthBuffer(), viewer.heightBuffer()) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } //end of testing int wposwC = viewer.getClickedX(); int hposwC = viewer.heightBuffer()-viewer.getClickedY(); double dposwC = zbufferC[hposwC*viewer.widthBuffer()+wposwC]; printf("First Clicked (w,h,depth) = %d,%d,%f\n", wposwC,hposwC,dposwC); glm::vec4 wposviewC = convertDepthBuffToViewPos(wposwC,hposwC,dposwC,&viewer); printf("First Clicked View Pos = %f,%f,%f\n", wposviewC.x,wposviewC.y,wposviewC.z); //debug //glm::vec4 testpostview = convertWorldToViewPos(4,5,4,&viewer); //printf("View Pos of World Pos (4,5,4) is = %f,%f,%f\n", testpostview.x,testpostview.y,testpostview.z); //// isHoldOn = true; checkPath = (dposwC<1.0); viewer.setPaused(checkPath); if (checkPath) { printf("Inside checkPath of first click\n"); for (int ii=0; ii<viewer.widthBuffer()*viewer.heightBuffer(); ii++) { if (zbufferC[ii]!=1.0) zbufferM[ii] = 255; else zbufferM[ii] = 0; } printf("after assigning zbufferM\n"); if (nrrdWrap_va(zbufferNin, zbufferM, nrrdTypeUChar, 2, viewer.widthBuffer(), viewer.heightBuffer()) ) { char *err = biffGetDone(NRRD); printf("%s: Error wrapping Nrrd:\n%s", argv[0], err); free(err); nrrdNix(zbufferNin); exit(1); } if (nrrdDistanceL2Signed(zbufferDis, zbufferNin, nrrdTypeFloat, NULL, 128, AIR_FALSE)) { char *err = biffGetDone(NRRD); printf("%s: Error doing distance transform:\n%s", argv[0], err); free(err); nrrdNix(zbufferDis); exit(1); } sprintf(outnameslice,"dis_trans.png"); //if (nrrdSave(outnameslice, zbufferDis, NULL)) { // char *err = biffGetDone(NRRD); // printf("%s: Error saving distance transform:\n%s", argv[0], err); // free(err); nrrdNix(zbufferDis); // exit(1); //} } } if (checkPath) { //GLfloat* zbuffer = new GLfloat[viewer.widthBuffer()*viewer.heightBuffer()]; //glReadPixels(0,0,viewer.widthBuffer(),viewer.heightBuffer(),GL_DEPTH_COMPONENT,GL_FLOAT,zbuffer); int wposw = viewer.getLastX(); int hposw = viewer.heightBuffer()-viewer.getLastY(); double disgrad[2]; disgrad[0] = disgrad[1] = 0; if (wposw<viewer.widthBuffer()-1 && wposw>0 && hposw<viewer.heightBuffer()-1 && hposw>0) { disgrad[0] = -(((float*)zbufferDis->data)[hposw*viewer.widthBuffer()+wposw+1]-((float*)zbufferDis->data)[hposw*viewer.widthBuffer()+wposw-1]); disgrad[1] = -(((float*)zbufferDis->data)[(hposw+1)*viewer.widthBuffer()+wposw]-((float*)zbufferDis->data)[(hposw-1)*viewer.widthBuffer()+wposw]); if (disgrad[0] || disgrad[1]) { printf("after assigning, disgrad = %f,%f\n",disgrad[0],disgrad[1]); normalize(disgrad,2); printf("after normalizing, disgrad = %f,%f\n",disgrad[0],disgrad[1]); double disval = ((float*)zbufferDis->data)[hposw*viewer.widthBuffer()+wposw]; printf("old wposw,hposw = %d,%d; disval = %f\n", wposw,hposw,disval); wposw += (disval*disgrad[0]); hposw += (disval*disgrad[1]); printf("jumped wposw,hposw = %d,%d\n", wposw,hposw); } } double dposw = zbufferC[hposw*viewer.widthBuffer()+wposw]; printf("Drag Clicked (w,h,depth) = %d,%d,%f\n", wposw,hposw,dposw); glm::vec4 wposview = convertDepthBuffToViewPos(wposw,hposw,dposw,&viewer); printf("Drag Clicked View Pos = %f,%f,%f\n", wposview.x,wposview.y,wposview.z); if (dposw<1.0 || disgrad[0] || disgrad[1]) { double dismin = INT_MAX; int mini = -1; for (int i=1; i<countline-3; i++) { glm::vec4 curposview = convertWorldToViewPos(arr_center[i*3+0],arr_center[i*3+1],arr_center[i*3+2],&viewer); //test using on x and y in view coordinate to find the closest point (not using z) //double dis1 = diss2P(wposview.x,wposview.y,wposview.z,curposview.x,curposview.y,curposview.z); glm::vec4 curposview2 = convertWorldToViewPos(arr_center[(i+1)*3+0],arr_center[(i+1)*3+1],arr_center[(i+1)*3+2],&viewer); double vec1[3],vec2[3]; vec1[0] = curposview.x-wposview.x; vec1[1] = curposview.y-wposview.y; vec1[2] = 0; vec2[0] = curposview2.x-wposview.x; vec2[1] = curposview2.y-wposview.y; vec2[2] = 0; normalize(vec1,3); normalize(vec2,3); double angle = computeAngle(vec1,vec2); printf("Point %d (%d), View pos 1 = %f,%f,%f; View pos 2 = %f,%f,%f; angle = %f\n",i,arr_nameid[i],curposview.x,curposview.y,curposview.z,curposview2.x,curposview2.y,curposview2.z, angle); if (angle > 100) { double dis1 = diss2P(wposview.x,wposview.y,0,curposview.x,curposview.y,0); //double dis2 = diss2P(wposview.x,wposview.y,wposview.z,curposview.x,curposview.y,curposview.z); double dis2 = diss2P(wposview.x,wposview.y,0,curposview2.x,curposview2.y,0); printf("Point %d (%d) View Pos = %f,%f,%f, dis = %f\n",i,arr_nameid[i], curposview.x,curposview.y,curposview.z,dis1); if (dis1+dis2<dismin) { dismin = dis1+dis2; mini = i; } } } if (mini>0) { //int numsample = 20; dismin = INT_MAX; double mint = -1; /* for (int i=0; i<=numsample; i++) { double t = (double)i/(double)numsample; double center[3]; for (int j=0; j<3; j++) center[j] = cubicFilter<double>(t, arr_center[(mini-1)*3+j], arr_center[(mini)*3+j], arr_center[(mini+1)*3+j], arr_center[(mini+2)*3+j]); glm::vec4 curposview = convertWorldToViewPos(center[0],center[1],center[2],&viewer); //double dis = diss2P(wposview.x,wposview.y,wposview.z,curposview.x,curposview.y,curposview.z); double dis = diss2P(wposview.x,wposview.y,1,curposview.x,curposview.y,1); if (dis<dismin) { dismin = dis; mint = t; } } */ int curp = timetop[mini]; while (curp<countls && ptotime[curp]==mini) { glm::vec4 curposview = convertWorldToViewPos(lpld3->xyzw[curp*4+0],lpld3->xyzw[curp*4+1],lpld3->xyzw[curp*4+2],&viewer); double dis = diss2P(wposview.x,wposview.y,1,curposview.x,curposview.y,1); if (dis<dismin) { dismin = dis; mint = ptofrac[curp]; curinterp = curp; } curp++; } if (curVolInMem != mini) { curVolInMem = mini; //find lerping between 2 volumes count = mini; /* curnameind = arr_nameid[count]; sprintf(inname,"%s/%d.nrrd",pathprefix,curnameind); if (nrrdLoad(nin, inname, NULL)) { err = biffGetDone(NRRD); fprintf(stderr, "%s: trouble reading \"%s\":\n%s", me, inname, err); free(err); return; } cout<<"read file "<<inname<<endl; unsigned int pixSize; cudaChannelFormatDesc channelDesc; pixSize = sizeof(float); channelDesc = cudaCreateChannelDesc<float>(); if (3 != nin->dim && 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 3D space, (not %uD in %uD)\n", argv[0], nin->dim, nin->spaceDim); airMopError(mop); exit(1); } int dim[4]; if (nin->dim == 3) { dim[0] = 1; dim[1] = nin->axis[0].size; dim[2] = nin->axis[1].size; dim[3] = nin->axis[2].size; } else //4-channel { dim[0] = nin->axis[0].size; dim[1] = nin->axis[1].size; dim[2] = nin->axis[2].size; dim[3] = nin->axis[3].size; } int channel = 1; for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i*2]; filemem1[i] = ((short*)nin->data)[i*2+1]; } //debug for (int k=0; k<=2; k++) for (int j=0; j<=2; j++) for (int i=0; i<=2; i++) printf("volume 1: at (%d,%d,%d) = %f\n", i,j,k,filemem0[k*dim[1]*dim[2]+j*dim[1]+i]); const cudaExtent volumeSize = make_cudaExtent(dim[1], dim[2], dim[3]); if (!d_volumeArray[0]) cudaMalloc3DArray(&d_volumeArray[0], &channelDesc, volumeSize); cudaMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_cudaPitchedPtr((void*)filemem0, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[0]; copyParams0.extent = volumeSize; copyParams0.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams0); */ cudaError_t errCu; cudaChannelFormatDesc channelDesc; channelDesc = cudaCreateChannelDesc<float>(); //cudaArray* d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); cudaArray* d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex0.normalized = false; tex0.filterMode = cudaFilterModeLinear; tex0.addressMode[0] = cudaAddressModeBorder; tex0.addressMode[1] = cudaAddressModeBorder; tex0.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex0, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex3.normalized = false; tex3.filterMode = cudaFilterModeLinear; tex3.addressMode[0] = cudaAddressModeBorder; tex3.addressMode[1] = cudaAddressModeBorder; tex3.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex3, d_curvolarr, channelDesc); //read second file count = mini+1; /* curnameind = arr_nameid[count]; sprintf(inname,"%s/%d.nrrd",pathprefix,curnameind); if (nrrdLoad(nin, inname, NULL)) { err = biffGetDone(NRRD); fprintf(stderr, "%s: trouble reading \"%s\":\n%s", me, inname, err); free(err); return; } cout<<"read file "<<inname<<endl; if (3 != nin->dim && 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 3D space, (not %uD in %uD)\n", argv[0], nin->dim, nin->spaceDim); airMopError(mop); exit(1); } for (int i=0; i<dim[1]*dim[2]*dim[3]; i++) { filemem0[i] = ((short*)nin->data)[i*2]; filemem1[i] = ((short*)nin->data)[i*2+1]; } if (!d_volumeArray[1]) cudaMalloc3DArray(&d_volumeArray[1], &channelDesc, volumeSize); cudaMemcpy3DParms copyParams1 = {0}; copyParams1.srcPtr = make_cudaPitchedPtr((void*)filemem0, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray[1]; copyParams1.extent = volumeSize; copyParams1.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams1); */ //d_curvolarr = queue.push(count,arr_nameid,pathprefix,mop); d_curvolarr = d_volumeArray[queue.push(count,arr_nameid,pathprefix,mop)]; tex1.normalized = false; tex1.filterMode = cudaFilterModeLinear; tex1.addressMode[0] = cudaAddressModeBorder; tex1.addressMode[1] = cudaAddressModeBorder; tex1.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex1, d_curvolarr, channelDesc); d_curvolarr = d_volumeArray1[queue.push(count,arr_nameid,pathprefix,mop)]; tex4.normalized = false; tex4.filterMode = cudaFilterModeLinear; tex4.addressMode[0] = cudaAddressModeBorder; tex4.addressMode[1] = cudaAddressModeBorder; tex4.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex4, d_curvolarr, channelDesc); } int numThread1D; alpha = mint; numThread1D = 8; dim3 threadsPerBlock(numThread1D,numThread1D,numThread1D); dim3 numBlocks((dim[1]+numThread1D-1)/numThread1D,(dim[2]+numThread1D-1)/numThread1D,(dim[3]+numThread1D-1)/numThread1D); double alpha = mint; //kernel_interpol<<<numBlocks,threadsPerBlock>>>(d_volmem,d_dim,alpha); kernel_interpol2<<<numBlocks,threadsPerBlock>>>(d_volmem,d_volmem2,d_dim,alpha); cudaError_t errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error After kernel_nterpol when clicking: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync After kernel_nterpol when clicking: %s\n", cudaGetErrorString(errCu)); //copy from device's global mem to texture mem copyParams0.srcPtr = make_cudaPitchedPtr((void*)d_volmem, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray[NTEX]; copyParams0.extent = volumeSize; copyParams0.kind = cudaMemcpyDeviceToDevice; cudaMemcpy3D(&copyParams0); tex2.normalized = false; tex2.filterMode = cudaFilterModeLinear; tex2.addressMode[0] = cudaAddressModeBorder; tex2.addressMode[1] = cudaAddressModeBorder; tex2.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex2, d_volumeArray[NTEX], channelDesc); copyParams1.srcPtr = make_cudaPitchedPtr((void*)d_volmem2, volumeSize.width*pixSize, volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray1[NTEX]; copyParams1.extent = volumeSize; copyParams1.kind = cudaMemcpyDeviceToDevice; cudaMemcpy3D(&copyParams1); tex5.normalized = false; tex5.filterMode = cudaFilterModeLinear; tex5.addressMode[0] = cudaAddressModeBorder; tex5.addressMode[1] = cudaAddressModeBorder; tex5.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex5, d_volumeArray1[NTEX], channelDesc); //after that call the normal kernel to do MIP count = mini; /* for (int i=0; i<3; i++) center[i] = cubicFilter<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //center[i] = ctmr(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); printf("center = %f %f %f\n", center[0],center[1],center[2]); */ double FT[3]; double FN[3],FB[3]; double dr[3],ddr[3]; /* for (int i=0; i<3; i++) dr[i] = cubicFilter_G<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //dr[i] = ctmr_g(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); for (int i=0; i<3; i++) ddr[i] = cubicFilter_GG<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); //ddr[i] = ctmr_gg(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); */ if (kern==1) { for (int i=0; i<3; i++) { center[i] = cubicFilter<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); dr[i] = cubicFilter_G<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); ddr[i] = cubicFilter_GG<double>(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } else { for (int i=0; i<3; i++) { center[i] = ctmr(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); dr[i] = ctmr_g(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); ddr[i] = ctmr_gg(alpha, arr_center[(count-1)*3+i], arr_center[(count)*3+i], arr_center[(count+1)*3+i], arr_center[(count+2)*3+i]); } } printf("dr = (%f,%f,%f)\n",dr[0],dr[1],dr[2]); printf("ddr = (%f,%f,%f)\n",ddr[0],ddr[1],ddr[2]); normalize(dr,3); normalize(ddr,3); printf("after normalizing\n"); printf("dr = (%f,%f,%f)\n",dr[0],dr[1],dr[2]); printf("ddr = (%f,%f,%f)\n",ddr[0],ddr[1],ddr[2]); memcpy(FT,dr,sizeof(double)*3); memcpy(FN,eigenvec,sizeof(double)*3); normalize(FN,3); cross(FT,FN,FB); cross(FB,FT,FN); memcpy(dir1,FN,sizeof(double)*3); memcpy(dir2,FB,sizeof(double)*3); printf("Interpolation: N = %f %f %f, B = %f %f %f, T = %f %f %f, dotNB = %f, dotNT = %f, dotBT = %f\n",FN[0],FN[1],FN[2],FB[0],FB[1],FB[2],FT[0],FT[1],FT[2], dotProduct(FN,FB,3),dotProduct(FN,FT,3),dotProduct(FB,FT,3)); cudaMemcpy(d_dir1, dir1, 3*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_dir2, dir2, 3*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_center,center,3*sizeof(double), cudaMemcpyHostToDevice); numThread1D = 16; dim3 threadsPerBlock2(numThread1D,numThread1D); dim3 numBlocks2((size[0]+numThread1D-1)/numThread1D,(size[1]+numThread1D-1)/numThread1D); //if (statePKey) // kernel_peak<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); //else //kernel_cpr<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); // kernel_cpr_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); if (statePKey) { if (stateIKey) kernel_peak_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_peak<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } else { if (stateIKey) kernel_cpr_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_cpr<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); } errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error After kernel_cpr when clicking: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync After kernel_cpr when clicking: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); short width = size[0]; short height = size[1]; //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); if (statePKey) quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); else quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); viewer.current(); printf("alpha = %f",alpha); printf("center of the previous interpolated point: %f,%f,%f\n", fmat2[3][0],fmat2[3][1],fmat2[3][2]); printf("center of the last interpolated point: %f,%f,%f\n", center[0],center[1],center[2]); fmat2[3][0] = center[0]; fmat2[3][1] = center[1]; fmat2[3][2] = center[2]; hpld_inter->model(fmat2); //update the local texture frame in first viewer viewer.current(); glm::mat4 tmat_sq_inter = glm::mat4(); tmat_sq_inter[0][0] = FN[0]; tmat_sq_inter[0][1] = FN[1]; tmat_sq_inter[0][2] = FN[2]; tmat_sq_inter[0][3] = 0; tmat_sq_inter[1][0] = FB[0]; tmat_sq_inter[1][1] = FB[1]; tmat_sq_inter[1][2] = FB[2]; tmat_sq_inter[1][3] = 0; tmat_sq_inter[2][0] = FT[0]; tmat_sq_inter[2][1] = FT[1]; tmat_sq_inter[2][2] = FT[2]; tmat_sq_inter[2][3] = 0; tmat_sq_inter[3][0] = center[0]; tmat_sq_inter[3][1] = center[1]; tmat_sq_inter[3][2] = center[2]; tmat_sq_inter[3][3] = 1; glm::mat4 smat_sq_inter = glm::mat4(); smat_sq_inter[0][0] = 2; smat_sq_inter[1][1] = 2; glm::mat4 fmat_sq_inter = tmat_sq_inter*smat_sq_inter; hpld_sq_inter->model(fmat_sq_inter); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } } } } else { isHoldOn = false; checkPath = false; viewer.setPaused(false); } //rotation in the second window printf("testing view params from window 1\n"); printf("curFrom = %f,%f,%f\n",viewer.camera.from()[0],viewer.camera.from()[1],viewer.camera.from()[2]); printf("curAt = %f,%f,%f\n",viewer.camera.at()[0],viewer.camera.at()[1],viewer.camera.at()[2]); printf("curUp = %f,%f,%f\n",viewer.camera.up()[0],viewer.camera.up()[1],viewer.camera.up()[2]); printf("before processing the rotation in second window++++++++++++++++++++++++\n"); printf("preFrom = %f,%f,%f\n",preFrom[0],preFrom[1],preFrom[2]); printf("curFrom = %f,%f,%f\n",viewer2.camera.from()[0],viewer2.camera.from()[1],viewer2.camera.from()[2]); printf("preAt = %f,%f,%f\n",preAt[0],preAt[1],preAt[2]); printf("curAt = %f,%f,%f\n",viewer2.camera.at()[0],viewer2.camera.at()[1],viewer2.camera.at()[2]); printf("preUp = %f,%f,%f\n",preUp[0],preUp[1],preUp[2]); printf("curUp = %f,%f,%f\n",viewer2.camera.up()[0],viewer2.camera.up()[1],viewer2.camera.up()[2]); if ((viewer2.camera.from() != preFrom || viewer2.camera.at()!=preAt || viewer2.camera.up()!=preUp)) //&& (viewer2.isMouseReleased())) { printf("changed viewing angles in view2------------------------------------\n"); glm::vec3 curZ = glm::normalize(viewer2.camera.up()); glm::vec3 curY = glm::normalize(viewer2.camera.at()-viewer2.camera.from()); glm::vec3 curX = glm::cross(curY,curZ); glm::mat3 curCo = glm::mat3(curX[0],curX[1],curX[2], curY[0],curY[1],curY[2], curZ[0],curZ[1],curZ[2]); glm::mat3 curTrans = curCo*preCoI; preCoI = glm::transpose(curCo); preFrom = viewer2.camera.from(); preAt = viewer2.camera.at(); preUp = viewer2.camera.up(); double FT[3]; cross(dir1,dir2,FT); glm::mat3 preCo2(FT[0],FT[1],FT[2], dir1[0],dir1[1],dir1[2], dir2[0],dir2[1],dir2[2]); glm::mat3 curCo2 = curTrans*preCo2; dir1[0] = curCo2[1][0]; dir1[1] = curCo2[1][1]; dir1[2] = curCo2[1][2]; dir2[0] = curCo2[2][0]; dir2[1] = curCo2[2][1]; dir2[2] = curCo2[2][2]; FT[0] = curCo2[0][0]; FT[1] = curCo2[0][1]; FT[2] = curCo2[0][2]; cudaMemcpy(d_dir1, dir1, 3*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_dir2, dir2, 3*sizeof(double), cudaMemcpyHostToDevice); if (statePKey) { if (stateIKey) kernel_peak_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_peak<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); //debug //sprintf(outnameslice,"test_peak.nrrd"); //if (nrrdWrap_va(ndblpng, imageDouble, nrrdTypeDouble, 3, 4, size[0], size[1]) // || nrrdSave(outnameslice, ndblpng, NULL) // ) { // char *err = biffGetDone(NRRD); // printf("%s: couldn't save output:\n%s", argv[0], err); // free(err); nrrdNix(ndblpng); // exit(1); // } //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range_p); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); //viewer.current(); //hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } else { if (stateIKey) kernel_cpr_2chan<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); else kernel_cpr<<<numBlocks2,threadsPerBlock2>>>(d_dim, d_size, verextent2, d_center, d_dir1, d_dir2, swidth, sstep, nOutChannel, d_imageDouble); errCu = cudaGetLastError(); if (errCu != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(errCu)); errCu = cudaDeviceSynchronize(); if (errCu != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(errCu)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); //quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); quantizeImageDouble3D_Range(imageDouble,imageQuantized,4,size[0],size[1],range); setPlane<unsigned char>(imageQuantized, 4, size[0], size[1], 255, 3); drawCircleWithColor(imageQuantized, 4, size[0], size[1], size[0]/2, size[1]/2, 10, 0.1, 255, 0, 0); viewer2.current(); hpldview2->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); //viewer.current(); //hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } viewer.current(); glm::mat4 tmat_sq_inter = glm::mat4(); tmat_sq_inter[0][0] = dir1[0]; tmat_sq_inter[0][1] = dir1[1]; tmat_sq_inter[0][2] = dir1[2]; tmat_sq_inter[0][3] = 0; tmat_sq_inter[1][0] = dir2[0]; tmat_sq_inter[1][1] = dir2[1]; tmat_sq_inter[1][2] = dir2[2]; tmat_sq_inter[1][3] = 0; tmat_sq_inter[2][0] = FT[0]; tmat_sq_inter[2][1] = FT[1]; tmat_sq_inter[2][2] = FT[2]; tmat_sq_inter[2][3] = 0; tmat_sq_inter[3][0] = center[0]; tmat_sq_inter[3][1] = center[1]; tmat_sq_inter[3][2] = center[2]; tmat_sq_inter[3][3] = 1; glm::mat4 smat_sq_inter = glm::mat4(); smat_sq_inter[0][0] = 2; smat_sq_inter[1][1] = 2; glm::mat4 fmat_sq_inter = tmat_sq_inter*smat_sq_inter; hpld_sq_inter->model(fmat_sq_inter); viewer.current(); hpld_sq_inter->replaceLastTexture((unsigned char *)imageQuantized,size[0],size[1],4); } viewer.current(); render(&viewer); viewer2.current(); render(&viewer2); viewer.current(); printf("end of an event loop\n"); printf("viewer: buffer = %d %d, window = %d %d\n", viewer.widthBuffer(), viewer.heightBuffer(), viewer.width(), viewer.height()); printf("viewer2: buffer = %d %d, window = %d %d\n", viewer2.widthBuffer(), viewer2.heightBuffer(), viewer2.width(), viewer2.height()); } /* clean exit; all okay */ Hale::done(); airMopOkay(mop); return 0; }
b8d0d9f0edcef00b317a5e47c627dcbd7ac4e950.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_ideal_gas_kernel; int xdim0_ideal_gas_kernel_h = -1; __constant__ int ydim0_ideal_gas_kernel; int ydim0_ideal_gas_kernel_h = -1; __constant__ int xdim1_ideal_gas_kernel; int xdim1_ideal_gas_kernel_h = -1; __constant__ int ydim1_ideal_gas_kernel; int ydim1_ideal_gas_kernel_h = -1; __constant__ int xdim2_ideal_gas_kernel; int xdim2_ideal_gas_kernel_h = -1; __constant__ int ydim2_ideal_gas_kernel; int ydim2_ideal_gas_kernel_h = -1; __constant__ int xdim3_ideal_gas_kernel; int xdim3_ideal_gas_kernel_h = -1; __constant__ int ydim3_ideal_gas_kernel; int ydim3_ideal_gas_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_ideal_gas_kernel*(y)+xdim0_ideal_gas_kernel*ydim0_ideal_gas_kernel*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_ideal_gas_kernel*(y)+xdim1_ideal_gas_kernel*ydim1_ideal_gas_kernel*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_ideal_gas_kernel*(y)+xdim2_ideal_gas_kernel*ydim2_ideal_gas_kernel*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_ideal_gas_kernel*(y)+xdim3_ideal_gas_kernel*ydim3_ideal_gas_kernel*(z)) //user function __device__ void ideal_gas_kernel_gpu( const double *density, const double *energy, double *pressure, double *soundspeed) { double sound_speed_squared, v, pressurebyenergy, pressurebyvolume; v = 1.0 / density[OPS_ACC0(0,0,0)]; pressure[OPS_ACC2(0,0,0)] = (1.4 - 1.0) * density[OPS_ACC0(0,0,0)] * energy[OPS_ACC1(0,0,0)]; pressurebyenergy = (1.4 - 1.0) * density[OPS_ACC0(0,0,0)]; pressurebyvolume = -1.0*density[OPS_ACC0(0,0,0)] * pressure[OPS_ACC2(0,0,0)]; sound_speed_squared = v*v*(pressure[OPS_ACC2(0,0,0)] * pressurebyenergy-pressurebyvolume); soundspeed[OPS_ACC3(0,0,0)] = sqrt(sound_speed_squared); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_ideal_gas_kernel( const double* __restrict arg0, const double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_ideal_gas_kernel + idx_z * 1*1 * xdim0_ideal_gas_kernel * ydim0_ideal_gas_kernel; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_ideal_gas_kernel + idx_z * 1*1 * xdim1_ideal_gas_kernel * ydim1_ideal_gas_kernel; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_ideal_gas_kernel + idx_z * 1*1 * xdim2_ideal_gas_kernel * ydim2_ideal_gas_kernel; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_ideal_gas_kernel + idx_z * 1*1 * xdim3_ideal_gas_kernel * ydim3_ideal_gas_kernel; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ideal_gas_kernel_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_ideal_gas_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,11)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(11,"ideal_gas_kernel"); OPS_kernels[11].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_ideal_gas_kernel_h || ydim0 != ydim0_ideal_gas_kernel_h || xdim1 != xdim1_ideal_gas_kernel_h || ydim1 != ydim1_ideal_gas_kernel_h || xdim2 != xdim2_ideal_gas_kernel_h || ydim2 != ydim2_ideal_gas_kernel_h || xdim3 != xdim3_ideal_gas_kernel_h || ydim3 != ydim3_ideal_gas_kernel_h) { hipMemcpyToSymbol( xdim0_ideal_gas_kernel, &xdim0, sizeof(int) ); xdim0_ideal_gas_kernel_h = xdim0; hipMemcpyToSymbol( ydim0_ideal_gas_kernel, &ydim0, sizeof(int) ); ydim0_ideal_gas_kernel_h = ydim0; hipMemcpyToSymbol( xdim1_ideal_gas_kernel, &xdim1, sizeof(int) ); xdim1_ideal_gas_kernel_h = xdim1; hipMemcpyToSymbol( ydim1_ideal_gas_kernel, &ydim1, sizeof(int) ); ydim1_ideal_gas_kernel_h = ydim1; hipMemcpyToSymbol( xdim2_ideal_gas_kernel, &xdim2, sizeof(int) ); xdim2_ideal_gas_kernel_h = xdim2; hipMemcpyToSymbol( ydim2_ideal_gas_kernel, &ydim2, sizeof(int) ); ydim2_ideal_gas_kernel_h = ydim2; hipMemcpyToSymbol( xdim3_ideal_gas_kernel, &xdim3, sizeof(int) ); xdim3_ideal_gas_kernel_h = xdim3; hipMemcpyToSymbol( ydim3_ideal_gas_kernel, &ydim3, sizeof(int) ); ydim3_ideal_gas_kernel_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[11].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_ideal_gas_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[11].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[11].mpi_time += t2-t1; OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 11; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 11; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_ideal_gas_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(11,"ideal_gas_kernel"); } ops_enqueue_kernel(desc); } #endif
b8d0d9f0edcef00b317a5e47c627dcbd7ac4e950.cu
// // auto-generated by ops.py // __constant__ int xdim0_ideal_gas_kernel; int xdim0_ideal_gas_kernel_h = -1; __constant__ int ydim0_ideal_gas_kernel; int ydim0_ideal_gas_kernel_h = -1; __constant__ int xdim1_ideal_gas_kernel; int xdim1_ideal_gas_kernel_h = -1; __constant__ int ydim1_ideal_gas_kernel; int ydim1_ideal_gas_kernel_h = -1; __constant__ int xdim2_ideal_gas_kernel; int xdim2_ideal_gas_kernel_h = -1; __constant__ int ydim2_ideal_gas_kernel; int ydim2_ideal_gas_kernel_h = -1; __constant__ int xdim3_ideal_gas_kernel; int xdim3_ideal_gas_kernel_h = -1; __constant__ int ydim3_ideal_gas_kernel; int ydim3_ideal_gas_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_ideal_gas_kernel*(y)+xdim0_ideal_gas_kernel*ydim0_ideal_gas_kernel*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_ideal_gas_kernel*(y)+xdim1_ideal_gas_kernel*ydim1_ideal_gas_kernel*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_ideal_gas_kernel*(y)+xdim2_ideal_gas_kernel*ydim2_ideal_gas_kernel*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_ideal_gas_kernel*(y)+xdim3_ideal_gas_kernel*ydim3_ideal_gas_kernel*(z)) //user function __device__ void ideal_gas_kernel_gpu( const double *density, const double *energy, double *pressure, double *soundspeed) { double sound_speed_squared, v, pressurebyenergy, pressurebyvolume; v = 1.0 / density[OPS_ACC0(0,0,0)]; pressure[OPS_ACC2(0,0,0)] = (1.4 - 1.0) * density[OPS_ACC0(0,0,0)] * energy[OPS_ACC1(0,0,0)]; pressurebyenergy = (1.4 - 1.0) * density[OPS_ACC0(0,0,0)]; pressurebyvolume = -1.0*density[OPS_ACC0(0,0,0)] * pressure[OPS_ACC2(0,0,0)]; sound_speed_squared = v*v*(pressure[OPS_ACC2(0,0,0)] * pressurebyenergy-pressurebyvolume); soundspeed[OPS_ACC3(0,0,0)] = sqrt(sound_speed_squared); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_ideal_gas_kernel( const double* __restrict arg0, const double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_ideal_gas_kernel + idx_z * 1*1 * xdim0_ideal_gas_kernel * ydim0_ideal_gas_kernel; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_ideal_gas_kernel + idx_z * 1*1 * xdim1_ideal_gas_kernel * ydim1_ideal_gas_kernel; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_ideal_gas_kernel + idx_z * 1*1 * xdim2_ideal_gas_kernel * ydim2_ideal_gas_kernel; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_ideal_gas_kernel + idx_z * 1*1 * xdim3_ideal_gas_kernel * ydim3_ideal_gas_kernel; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ideal_gas_kernel_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_ideal_gas_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,11)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(11,"ideal_gas_kernel"); OPS_kernels[11].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_ideal_gas_kernel_h || ydim0 != ydim0_ideal_gas_kernel_h || xdim1 != xdim1_ideal_gas_kernel_h || ydim1 != ydim1_ideal_gas_kernel_h || xdim2 != xdim2_ideal_gas_kernel_h || ydim2 != ydim2_ideal_gas_kernel_h || xdim3 != xdim3_ideal_gas_kernel_h || ydim3 != ydim3_ideal_gas_kernel_h) { cudaMemcpyToSymbol( xdim0_ideal_gas_kernel, &xdim0, sizeof(int) ); xdim0_ideal_gas_kernel_h = xdim0; cudaMemcpyToSymbol( ydim0_ideal_gas_kernel, &ydim0, sizeof(int) ); ydim0_ideal_gas_kernel_h = ydim0; cudaMemcpyToSymbol( xdim1_ideal_gas_kernel, &xdim1, sizeof(int) ); xdim1_ideal_gas_kernel_h = xdim1; cudaMemcpyToSymbol( ydim1_ideal_gas_kernel, &ydim1, sizeof(int) ); ydim1_ideal_gas_kernel_h = ydim1; cudaMemcpyToSymbol( xdim2_ideal_gas_kernel, &xdim2, sizeof(int) ); xdim2_ideal_gas_kernel_h = xdim2; cudaMemcpyToSymbol( ydim2_ideal_gas_kernel, &ydim2, sizeof(int) ); ydim2_ideal_gas_kernel_h = ydim2; cudaMemcpyToSymbol( xdim3_ideal_gas_kernel, &xdim3, sizeof(int) ); xdim3_ideal_gas_kernel_h = xdim3; cudaMemcpyToSymbol( ydim3_ideal_gas_kernel, &ydim3, sizeof(int) ); ydim3_ideal_gas_kernel_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[11].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_ideal_gas_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[11].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[11].mpi_time += t2-t1; OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 11; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 11; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_ideal_gas_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(11,"ideal_gas_kernel"); } ops_enqueue_kernel(desc); } #endif
6eab980925d862be5f2ed60af1ffea351a1cd92e.hip
// !!! This is a file automatically generated by hipify!!! #include "common.h" #include "mmio_highlevel.h" #include "utils.h" #include "tranpose.h" #include "sptrans_cuda.h" #include "sptrans_kernal.h" int main(int argc, char ** argv) { // report precision of floating-point printf("---------------------------------------------------------------------------------------------\n"); char *precision; if (sizeof(VALUE_TYPE) == 4) { precision = (char *)"32-bit Single Precision"; } else if (sizeof(VALUE_TYPE) == 8) { precision = (char *)"64-bit Double Precision"; } else { printf("Wrong precision. Program exit!\n"); return 0; } printf("PRECISION = %s\n", precision); printf("Benchmark REPEAT = %i\n", BENCH_REPEAT); printf("---------------------------------------------------------------------------------------------\n"); int m, n, nnzA, isSymmetricA; int *csrRowPtrA; int *csrColIdxA; double *csrValA; int *cscRowIdxA; int *cscColPtrA; double *cscValA; int device_id = 0; int dataformatted = DATAFORMAT_CSR; // "Usage: ``./sptrans -n (#gpu) -csr -mtx A.mtx'' int argi = 1; // load number of GPU char *devstr; if(argc > argi) { devstr = argv[argi]; argi++; } if (strcmp(devstr, "-n") != 0) return 0; int ngpu; if(argc > argi) { ngpu = atoi(argv[argi]); argi++; } int deviceCount; hipGetDeviceCount(&deviceCount); if (deviceCount < ngpu) { printf("Error: Not enough number of GPUs. Only %i available\n", deviceCount); return -1; } if (ngpu <= 0) { printf("Error: Number of GPU(s) needs to be greater than 0.\n"); return -1; } printf("Using %i GPU(s).\n", ngpu); // load format, csr or csc char *dataFormat; if(argc > argi) { dataFormat = argv[argi]; argi++; } if (strcmp(dataFormat, "-csr") == 0) dataformatted = DATAFORMAT_CSR; else if (strcmp(dataFormat, "-csc") == 0) dataformatted = DATAFORMAT_CSC; printf("input data format = %s\n", dataFormat); printf("dataformatted = %i\n", dataformatted); // load matrix file type, mtx, cscl, or cscu char *matstr; if(argc > argi) { matstr = argv[argi]; argi++; } printf("matstr = %s\n", matstr); // load matrix data from file char *filename; if(argc > argi) { filename = argv[argi]; argi++; } printf("-------------- %s --------------\n", filename); srand(time(NULL)); if (strcmp(matstr, "-mtx") == 0) { // load mtx data to the csr format mmio_info(&m, &n, &nnzA, &isSymmetricA, filename); csrRowPtrA = (int *)malloc((m+1) * sizeof(int)); csrColIdxA = (int *)malloc(nnzA * sizeof(int)); csrValA = (double *)malloc(nnzA * sizeof(double)); mmio_data(csrRowPtrA, csrColIdxA, csrValA, filename); printf("input matrix A: ( %i, %i ) nnz = %i\n", m, n, nnzA); /* printf("\n csrColIdx = ["); for(int j = 0; j < nnzA; j++) printf(" %d ", csrColIdxA[j]); printf("]\n"); printf("csrRowPtr =["); for(int j = 0; j < m+1; j++) printf(" %d ", csrRowPtrA[j]); printf("]\n"); */ int nnz_pointer = 0; for (int i = 0; i < m; i++) { for (int j = csrRowPtrA[i]; j < csrRowPtrA[i+1]; j++) { csrValA[nnz_pointer] = rand() % 10 + 1; //csrValA[j]; nnz_pointer++; } } cscRowIdxA = (int *)malloc(nnzA * sizeof(int)); cscColPtrA = (int *)malloc((n+1) * sizeof(int)); memset(cscColPtrA, 0, (n+1) * sizeof(int)); cscValA = (double *)malloc(nnzA * sizeof(double)); struct timeval t1, t2; double time_cpu_trans= 0; gettimeofday(&t1, NULL); // transpose from csr to csc matrix_transposition(m, n, nnzA, csrRowPtrA, csrColIdxA, csrValA, cscRowIdxA, cscColPtrA, cscValA); gettimeofday(&t2, NULL); time_cpu_trans = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("matrix trans used %4.8f ms in cpu node,\n",time_cpu_trans); } else{ printf("donot support other format now, waiting for update!"); } // test cpu result int *csrRowPtrB; int *csrColIdxB; double *csrValB; csrRowPtrB = (int *)malloc((m+1) * sizeof(int)); csrColIdxB = (int *)malloc(nnzA * sizeof(int)); memset(csrRowPtrB, 0, (m+1) * sizeof(int)); csrValB = (double *)malloc(nnzA * sizeof(double)); // // transpose from csc to csrB matrix_transposition_back(n, m, nnzA, cscColPtrA, cscRowIdxA, cscValA, csrColIdxB, csrRowPtrB, csrValB); double accuracy = 1e-4; double ref = 0.0; double res = 0.0; for (int i = 0; i < nnzA; i++) { ref += abs(csrValA[i]); res += abs(csrValB[i] - csrValA[i]); // if (csrValA[i] != csrValB[i]) printf ("[%i, %d] csrValA = %f, csrValB = %f\n", i, csrColIdxA[i], csrValA[i], csrValB[i]); } res = ref == 0 ? res : res / ref; if (res < accuracy) printf("matrix transposition in cpu: passed! |x-xref|/|xref| = %8.2e\n", res); else printf("matrix transposition in cpu: _NOT_ passed! |x-xref|/|xref| = %8.2e\n", res); free(csrColIdxB); free(csrValB); free(csrRowPtrB); // set device //hipSetDevice(device_id); // hipDeviceProp_t deviceProp; // hipGetDeviceProperties(&deviceProp, device_id); //printf("---------------------------------------------------------------------------------------------\n"); //printf("Device [ %i ] %s @ %4.2f MHz\n", device_id, deviceProp.name, deviceProp.clockRate * 1e-3f); // run cuda trans int *cscRowIdx; int *cscColPtr; double *cscVal; cscRowIdx = (int *)malloc(nnzA * sizeof(int)); cscColPtr = (int *)malloc((n+1) * sizeof(int)); memset(cscColPtr, 0, (n+1) * sizeof(int)); cscVal = (double *)malloc(nnzA * sizeof(double)); memset(cscVal, 0, nnzA * sizeof(double)); printf("---------------------------------------------------------------------------------------------\n"); cuda_sptrans(m, n, nnzA, csrRowPtrA, csrColIdxA, csrValA, cscRowIdx, cscColPtr, cscVal, cscRowIdxA, cscColPtrA, cscValA); printf("---------------------------------------------------------------------------------------------\n"); int *cscRowIdx2; int *cscColPtr2; double *cscVal2; cscRowIdx2 = (int *)malloc(nnzA * sizeof(int)); cscColPtr2 = (int *)malloc((n+1) * sizeof(int)); memset(cscColPtr2, 0, (n+1) * sizeof(int)); cscVal2 = (double *)malloc(nnzA * sizeof(double)); printf("---------------------------------------------------------------------------------------------\n"); kernal_sptrans(m, n, nnzA, ngpu, csrRowPtrA, csrColIdxA, csrValA, cscRowIdx2, cscColPtr2, cscVal2, cscRowIdxA, cscColPtrA, cscValA); printf("---------------------------------------------------------------------------------------------\n"); // done! free(cscRowIdx); free(cscColPtr); free(cscVal); free(cscRowIdx2); free(cscColPtr2); free(cscVal2); free(cscRowIdxA); free(cscColPtrA); free(cscValA); free(csrColIdxA); free(csrValA); free(csrRowPtrA); return 0; }
6eab980925d862be5f2ed60af1ffea351a1cd92e.cu
#include "common.h" #include "mmio_highlevel.h" #include "utils.h" #include "tranpose.h" #include "sptrans_cuda.h" #include "sptrans_kernal.h" int main(int argc, char ** argv) { // report precision of floating-point printf("---------------------------------------------------------------------------------------------\n"); char *precision; if (sizeof(VALUE_TYPE) == 4) { precision = (char *)"32-bit Single Precision"; } else if (sizeof(VALUE_TYPE) == 8) { precision = (char *)"64-bit Double Precision"; } else { printf("Wrong precision. Program exit!\n"); return 0; } printf("PRECISION = %s\n", precision); printf("Benchmark REPEAT = %i\n", BENCH_REPEAT); printf("---------------------------------------------------------------------------------------------\n"); int m, n, nnzA, isSymmetricA; int *csrRowPtrA; int *csrColIdxA; double *csrValA; int *cscRowIdxA; int *cscColPtrA; double *cscValA; int device_id = 0; int dataformatted = DATAFORMAT_CSR; // "Usage: ``./sptrans -n (#gpu) -csr -mtx A.mtx'' int argi = 1; // load number of GPU char *devstr; if(argc > argi) { devstr = argv[argi]; argi++; } if (strcmp(devstr, "-n") != 0) return 0; int ngpu; if(argc > argi) { ngpu = atoi(argv[argi]); argi++; } int deviceCount; cudaGetDeviceCount(&deviceCount); if (deviceCount < ngpu) { printf("Error: Not enough number of GPUs. Only %i available\n", deviceCount); return -1; } if (ngpu <= 0) { printf("Error: Number of GPU(s) needs to be greater than 0.\n"); return -1; } printf("Using %i GPU(s).\n", ngpu); // load format, csr or csc char *dataFormat; if(argc > argi) { dataFormat = argv[argi]; argi++; } if (strcmp(dataFormat, "-csr") == 0) dataformatted = DATAFORMAT_CSR; else if (strcmp(dataFormat, "-csc") == 0) dataformatted = DATAFORMAT_CSC; printf("input data format = %s\n", dataFormat); printf("dataformatted = %i\n", dataformatted); // load matrix file type, mtx, cscl, or cscu char *matstr; if(argc > argi) { matstr = argv[argi]; argi++; } printf("matstr = %s\n", matstr); // load matrix data from file char *filename; if(argc > argi) { filename = argv[argi]; argi++; } printf("-------------- %s --------------\n", filename); srand(time(NULL)); if (strcmp(matstr, "-mtx") == 0) { // load mtx data to the csr format mmio_info(&m, &n, &nnzA, &isSymmetricA, filename); csrRowPtrA = (int *)malloc((m+1) * sizeof(int)); csrColIdxA = (int *)malloc(nnzA * sizeof(int)); csrValA = (double *)malloc(nnzA * sizeof(double)); mmio_data(csrRowPtrA, csrColIdxA, csrValA, filename); printf("input matrix A: ( %i, %i ) nnz = %i\n", m, n, nnzA); /* printf("\n csrColIdx = ["); for(int j = 0; j < nnzA; j++) printf(" %d ", csrColIdxA[j]); printf("]\n"); printf("csrRowPtr =["); for(int j = 0; j < m+1; j++) printf(" %d ", csrRowPtrA[j]); printf("]\n"); */ int nnz_pointer = 0; for (int i = 0; i < m; i++) { for (int j = csrRowPtrA[i]; j < csrRowPtrA[i+1]; j++) { csrValA[nnz_pointer] = rand() % 10 + 1; //csrValA[j]; nnz_pointer++; } } cscRowIdxA = (int *)malloc(nnzA * sizeof(int)); cscColPtrA = (int *)malloc((n+1) * sizeof(int)); memset(cscColPtrA, 0, (n+1) * sizeof(int)); cscValA = (double *)malloc(nnzA * sizeof(double)); struct timeval t1, t2; double time_cpu_trans= 0; gettimeofday(&t1, NULL); // transpose from csr to csc matrix_transposition(m, n, nnzA, csrRowPtrA, csrColIdxA, csrValA, cscRowIdxA, cscColPtrA, cscValA); gettimeofday(&t2, NULL); time_cpu_trans = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("matrix trans used %4.8f ms in cpu node,\n",time_cpu_trans); } else{ printf("donot support other format now, waiting for update!"); } // test cpu result int *csrRowPtrB; int *csrColIdxB; double *csrValB; csrRowPtrB = (int *)malloc((m+1) * sizeof(int)); csrColIdxB = (int *)malloc(nnzA * sizeof(int)); memset(csrRowPtrB, 0, (m+1) * sizeof(int)); csrValB = (double *)malloc(nnzA * sizeof(double)); // // transpose from csc to csrB matrix_transposition_back(n, m, nnzA, cscColPtrA, cscRowIdxA, cscValA, csrColIdxB, csrRowPtrB, csrValB); double accuracy = 1e-4; double ref = 0.0; double res = 0.0; for (int i = 0; i < nnzA; i++) { ref += abs(csrValA[i]); res += abs(csrValB[i] - csrValA[i]); // if (csrValA[i] != csrValB[i]) printf ("[%i, %d] csrValA = %f, csrValB = %f\n", i, csrColIdxA[i], csrValA[i], csrValB[i]); } res = ref == 0 ? res : res / ref; if (res < accuracy) printf("matrix transposition in cpu: passed! |x-xref|/|xref| = %8.2e\n", res); else printf("matrix transposition in cpu: _NOT_ passed! |x-xref|/|xref| = %8.2e\n", res); free(csrColIdxB); free(csrValB); free(csrRowPtrB); // set device //cudaSetDevice(device_id); // cudaDeviceProp deviceProp; // cudaGetDeviceProperties(&deviceProp, device_id); //printf("---------------------------------------------------------------------------------------------\n"); //printf("Device [ %i ] %s @ %4.2f MHz\n", device_id, deviceProp.name, deviceProp.clockRate * 1e-3f); // run cuda trans int *cscRowIdx; int *cscColPtr; double *cscVal; cscRowIdx = (int *)malloc(nnzA * sizeof(int)); cscColPtr = (int *)malloc((n+1) * sizeof(int)); memset(cscColPtr, 0, (n+1) * sizeof(int)); cscVal = (double *)malloc(nnzA * sizeof(double)); memset(cscVal, 0, nnzA * sizeof(double)); printf("---------------------------------------------------------------------------------------------\n"); cuda_sptrans(m, n, nnzA, csrRowPtrA, csrColIdxA, csrValA, cscRowIdx, cscColPtr, cscVal, cscRowIdxA, cscColPtrA, cscValA); printf("---------------------------------------------------------------------------------------------\n"); int *cscRowIdx2; int *cscColPtr2; double *cscVal2; cscRowIdx2 = (int *)malloc(nnzA * sizeof(int)); cscColPtr2 = (int *)malloc((n+1) * sizeof(int)); memset(cscColPtr2, 0, (n+1) * sizeof(int)); cscVal2 = (double *)malloc(nnzA * sizeof(double)); printf("---------------------------------------------------------------------------------------------\n"); kernal_sptrans(m, n, nnzA, ngpu, csrRowPtrA, csrColIdxA, csrValA, cscRowIdx2, cscColPtr2, cscVal2, cscRowIdxA, cscColPtrA, cscValA); printf("---------------------------------------------------------------------------------------------\n"); // done! free(cscRowIdx); free(cscColPtr); free(cscVal); free(cscRowIdx2); free(cscColPtr2); free(cscVal2); free(cscRowIdxA); free(cscColPtrA); free(cscValA); free(csrColIdxA); free(csrValA); free(csrRowPtrA); return 0; }
ea09c5ee6cfa23c084c1bfe814804749d3a06a25.hip
// !!! This is a file automatically generated by hipify!!! //xfail:BOOGIE_ERROR //--blockDim=512 --gridDim=1 --no-inline //Write by thread .+kernel\.cu:8:21: #include <hip/hip_runtime.h> __global__ void curand_test(hiprandStateMtgp32_t *state, float *A) { A[threadIdx.x] = hiprand(state); }
ea09c5ee6cfa23c084c1bfe814804749d3a06a25.cu
//xfail:BOOGIE_ERROR //--blockDim=512 --gridDim=1 --no-inline //Write by thread .+kernel\.cu:8:21: #include <cuda.h> __global__ void curand_test(curandStateMtgp32_t *state, float *A) { A[threadIdx.x] = curand(state); }
dd24049d0e63cd78ac659e07cda8ac3659ae39eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file adagrad.cu * @author Sedrick Keh * @version 1.0 * @date 2019-07-26 * * @copyright Copyright (c) 2019 */ #include "math/optimizer_math/adagrad.h" #define BLK_SIZE 1024 namespace magmadnn { namespace math { template <typename T> __global__ void kernel_adagrad_device(T learning_rate, T *scaling_tensors, T* grad, T *out, unsigned int size) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { scaling_tensors[i] += (grad[i] * grad[i]); out[i] = out[i] - (learning_rate / sqrt(1e-8 + scaling_tensors[i])) * grad[i]; } } template <typename T> void adagrad_device(T learning_rate, Tensor<T> *scaling_tensors, Tensor<T> *grad, Tensor<T> *out) { unsigned int size = out->get_size(); hipLaunchKernelGGL(( kernel_adagrad_device), dim3((size + BLK_SIZE - 1) / BLK_SIZE), dim3(BLK_SIZE), 0, 0, learning_rate, scaling_tensors->get_ptr(), grad->get_ptr(), out->get_ptr(), size); } template void adagrad_device(int learning_rate, Tensor<int> *scaling_tensors, Tensor<int> *grad, Tensor<int> *out); template void adagrad_device(float learning_rate, Tensor<float> *scaling_tensors, Tensor<float> *grad, Tensor<float> *out); template void adagrad_device(double learning_rate, Tensor<double> *scaling_tensors, Tensor<double> *grad, Tensor<double> *out); } // namespace math } // namespace magmadnn #undef BLK_SIZE
dd24049d0e63cd78ac659e07cda8ac3659ae39eb.cu
/** * @file adagrad.cu * @author Sedrick Keh * @version 1.0 * @date 2019-07-26 * * @copyright Copyright (c) 2019 */ #include "math/optimizer_math/adagrad.h" #define BLK_SIZE 1024 namespace magmadnn { namespace math { template <typename T> __global__ void kernel_adagrad_device(T learning_rate, T *scaling_tensors, T* grad, T *out, unsigned int size) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { scaling_tensors[i] += (grad[i] * grad[i]); out[i] = out[i] - (learning_rate / sqrt(1e-8 + scaling_tensors[i])) * grad[i]; } } template <typename T> void adagrad_device(T learning_rate, Tensor<T> *scaling_tensors, Tensor<T> *grad, Tensor<T> *out) { unsigned int size = out->get_size(); kernel_adagrad_device<<<(size + BLK_SIZE - 1) / BLK_SIZE, BLK_SIZE>>>(learning_rate, scaling_tensors->get_ptr(), grad->get_ptr(), out->get_ptr(), size); } template void adagrad_device(int learning_rate, Tensor<int> *scaling_tensors, Tensor<int> *grad, Tensor<int> *out); template void adagrad_device(float learning_rate, Tensor<float> *scaling_tensors, Tensor<float> *grad, Tensor<float> *out); template void adagrad_device(double learning_rate, Tensor<double> *scaling_tensors, Tensor<double> *grad, Tensor<double> *out); } // namespace math } // namespace magmadnn #undef BLK_SIZE
91dc608b7213554a324716427aac90163099f92d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_computeDesctriptorHIPRot.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; bool *_d_isdescriptor = NULL; hipMalloc(&_d_isdescriptor, XSIZE*YSIZE); char *_d_vdescriptor = NULL; hipMalloc(&_d_vdescriptor, XSIZE*YSIZE); int *_d_keypointsIndexX = NULL; hipMalloc(&_d_keypointsIndexX, XSIZE*YSIZE); int *_d_keypointsIndexY = NULL; hipMalloc(&_d_keypointsIndexY, XSIZE*YSIZE); int *_d_keypointsRotation = NULL; hipMalloc(&_d_keypointsRotation, XSIZE*YSIZE); int _amountofkeypoints = 1; unsigned int *_d_integralImage = NULL; hipMalloc(&_d_integralImage, XSIZE*YSIZE); int _d_width = XSIZE; int _d_height = YSIZE; float _scale = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_computeDesctriptorCUDARot), dim3(gridBlock),dim3(threadBlock), 0, 0, _d_isdescriptor,_d_vdescriptor,_d_keypointsIndexX,_d_keypointsIndexY,_d_keypointsRotation,_amountofkeypoints,_d_integralImage,_d_width,_d_height,_scale); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_computeDesctriptorCUDARot), dim3(gridBlock),dim3(threadBlock), 0, 0, _d_isdescriptor,_d_vdescriptor,_d_keypointsIndexX,_d_keypointsIndexY,_d_keypointsRotation,_amountofkeypoints,_d_integralImage,_d_width,_d_height,_scale); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_computeDesctriptorCUDARot), dim3(gridBlock),dim3(threadBlock), 0, 0, _d_isdescriptor,_d_vdescriptor,_d_keypointsIndexX,_d_keypointsIndexY,_d_keypointsRotation,_amountofkeypoints,_d_integralImage,_d_width,_d_height,_scale); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
91dc608b7213554a324716427aac90163099f92d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_computeDesctriptorCUDARot.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; bool *_d_isdescriptor = NULL; cudaMalloc(&_d_isdescriptor, XSIZE*YSIZE); char *_d_vdescriptor = NULL; cudaMalloc(&_d_vdescriptor, XSIZE*YSIZE); int *_d_keypointsIndexX = NULL; cudaMalloc(&_d_keypointsIndexX, XSIZE*YSIZE); int *_d_keypointsIndexY = NULL; cudaMalloc(&_d_keypointsIndexY, XSIZE*YSIZE); int *_d_keypointsRotation = NULL; cudaMalloc(&_d_keypointsRotation, XSIZE*YSIZE); int _amountofkeypoints = 1; unsigned int *_d_integralImage = NULL; cudaMalloc(&_d_integralImage, XSIZE*YSIZE); int _d_width = XSIZE; int _d_height = YSIZE; float _scale = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_computeDesctriptorCUDARot<<<gridBlock,threadBlock>>>(_d_isdescriptor,_d_vdescriptor,_d_keypointsIndexX,_d_keypointsIndexY,_d_keypointsRotation,_amountofkeypoints,_d_integralImage,_d_width,_d_height,_scale); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_computeDesctriptorCUDARot<<<gridBlock,threadBlock>>>(_d_isdescriptor,_d_vdescriptor,_d_keypointsIndexX,_d_keypointsIndexY,_d_keypointsRotation,_amountofkeypoints,_d_integralImage,_d_width,_d_height,_scale); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_computeDesctriptorCUDARot<<<gridBlock,threadBlock>>>(_d_isdescriptor,_d_vdescriptor,_d_keypointsIndexX,_d_keypointsIndexY,_d_keypointsRotation,_amountofkeypoints,_d_integralImage,_d_width,_d_height,_scale); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c3cce62f383340dc6cf50bae570ab423cf28ed0f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <time.h> #define N 32*1024*1024 #define BLOCK_SIZE 256 template <unsigned int blockSize> __device__ void warpReduce(volatile float* cache,int tid){ if(blockSize >= 64)cache[tid]+=cache[tid+32]; if(blockSize >= 32)cache[tid]+=cache[tid+16]; if(blockSize >= 16)cache[tid]+=cache[tid+8]; if(blockSize >= 8)cache[tid]+=cache[tid+4]; if(blockSize >= 4)cache[tid]+=cache[tid+2]; if(blockSize >= 2)cache[tid]+=cache[tid+1]; } template <unsigned int blockSize> __global__ void reduce_v5(float *g_idata,float *g_odata){ __shared__ float sdata[BLOCK_SIZE]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; sdata[tid] = g_idata[i] + g_idata[i + blockDim.x]; __syncthreads(); // do reduction in shared mem if(blockSize>=512){ if(tid<256){ sdata[tid]+=sdata[tid+256]; } __syncthreads(); } if(blockSize>=256){ if(tid<128){ sdata[tid]+=sdata[tid+128]; } __syncthreads(); } if(blockSize>=128){ if(tid<64){ sdata[tid]+=sdata[tid+64]; } __syncthreads(); } // write result for this block to global mem if(tid<32)warpReduce<blockSize>(sdata,tid); if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } int main() { float *input_host = (float*)malloc(N*sizeof(float)); float *input_device; hipMalloc((void **)&input_device, N*sizeof(float)); for (int i = 0; i < N; i++) input_host[i] = 2.0; hipMemcpy(input_device, input_host, N*sizeof(float), hipMemcpyHostToDevice); int32_t block_num = (N + BLOCK_SIZE - 1) / BLOCK_SIZE / 2; float *output_host = (float*)malloc((block_num) * sizeof(float)); float *output_device; hipMalloc((void **)&output_device, (block_num) * sizeof(float)); dim3 grid(block_num, 1); dim3 block(BLOCK_SIZE, 1); hipLaunchKernelGGL(( reduce_v5<BLOCK_SIZE>), dim3(grid), dim3(block), 0, 0, input_device, output_device); hipMemcpy(output_host, output_device, block_num * sizeof(float), hipMemcpyDeviceToHost); return 0; }
c3cce62f383340dc6cf50bae570ab423cf28ed0f.cu
#include <cuda.h> #include <cuda_runtime.h> #include <time.h> #define N 32*1024*1024 #define BLOCK_SIZE 256 template <unsigned int blockSize> __device__ void warpReduce(volatile float* cache,int tid){ if(blockSize >= 64)cache[tid]+=cache[tid+32]; if(blockSize >= 32)cache[tid]+=cache[tid+16]; if(blockSize >= 16)cache[tid]+=cache[tid+8]; if(blockSize >= 8)cache[tid]+=cache[tid+4]; if(blockSize >= 4)cache[tid]+=cache[tid+2]; if(blockSize >= 2)cache[tid]+=cache[tid+1]; } template <unsigned int blockSize> __global__ void reduce_v5(float *g_idata,float *g_odata){ __shared__ float sdata[BLOCK_SIZE]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; sdata[tid] = g_idata[i] + g_idata[i + blockDim.x]; __syncthreads(); // do reduction in shared mem if(blockSize>=512){ if(tid<256){ sdata[tid]+=sdata[tid+256]; } __syncthreads(); } if(blockSize>=256){ if(tid<128){ sdata[tid]+=sdata[tid+128]; } __syncthreads(); } if(blockSize>=128){ if(tid<64){ sdata[tid]+=sdata[tid+64]; } __syncthreads(); } // write result for this block to global mem if(tid<32)warpReduce<blockSize>(sdata,tid); if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } int main() { float *input_host = (float*)malloc(N*sizeof(float)); float *input_device; cudaMalloc((void **)&input_device, N*sizeof(float)); for (int i = 0; i < N; i++) input_host[i] = 2.0; cudaMemcpy(input_device, input_host, N*sizeof(float), cudaMemcpyHostToDevice); int32_t block_num = (N + BLOCK_SIZE - 1) / BLOCK_SIZE / 2; float *output_host = (float*)malloc((block_num) * sizeof(float)); float *output_device; cudaMalloc((void **)&output_device, (block_num) * sizeof(float)); dim3 grid(block_num, 1); dim3 block(BLOCK_SIZE, 1); reduce_v5<BLOCK_SIZE><<<grid, block>>>(input_device, output_device); cudaMemcpy(output_host, output_device, block_num * sizeof(float), cudaMemcpyDeviceToHost); return 0; }
1c86842f587fdcfae5765f2a364a198350e96163.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // first use of cuda, 17/11/17 #include<stdio.h> #include<assert.h> #define SIZE 12 //12000000 #define NUM_BLOCKS 8192 #define NUM_THREADS 5012 __global__ void add( int * d_a, int * d_b, int * d_c ) { // we have multiple blocks with multiple threads, // so that's how we access a thread: int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if ( idx < SIZE ) { d_c[idx] = d_a[idx] + d_b[idx]; } } int main() { int i; int * h_a, * h_b, * h_c; // h means host pointer int * d_a, * d_b, * d_c; // d means device pointer size_t size_in_bytes = SIZE * sizeof(int); // allocate the pointers h_a = (int *) malloc( size_in_bytes); h_b = (int *) malloc( size_in_bytes); h_c = (int *) malloc( size_in_bytes); hipMalloc( (void**) &d_a, size_in_bytes ); hipMalloc( (void**) &d_b, size_in_bytes ); hipMalloc( (void**) &d_c, size_in_bytes ); // initialize the arrays before copying for (i = 0; i < SIZE; ++i) { h_a[i] = 1; h_b[i] = 2; } // copying from cpu to gpu hipMemcpy( d_a, h_a, size_in_bytes, hipMemcpyHostToDevice ); hipMemcpy( d_b, h_b, size_in_bytes, hipMemcpyHostToDevice ); // SIZE + NUM_THREADS makes sure that we create enough threads hipLaunchKernelGGL(( add), dim3((SIZE + NUM_THREADS) / NUM_THREADS), dim3(NUM_THREADS) , 0, 0, d_a, d_b, d_c ); // copying from gpu to cpu hipMemcpy( h_c, d_c, size_in_bytes, hipMemcpyDeviceToHost ); if ( SIZE < 100 ) { for (i = 0; i < SIZE; ++i) { fprintf( stdout, "%d", h_c[i] ); } } free( h_a ); free( h_b ); free( h_c ); hipFree( d_a ); hipFree( d_b ); hipFree( d_c ); return 0; }
1c86842f587fdcfae5765f2a364a198350e96163.cu
// first use of cuda, 17/11/17 #include<stdio.h> #include<assert.h> #define SIZE 12 //12000000 #define NUM_BLOCKS 8192 #define NUM_THREADS 5012 __global__ void add( int * d_a, int * d_b, int * d_c ) { // we have multiple blocks with multiple threads, // so that's how we access a thread: int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if ( idx < SIZE ) { d_c[idx] = d_a[idx] + d_b[idx]; } } int main() { int i; int * h_a, * h_b, * h_c; // h means host pointer int * d_a, * d_b, * d_c; // d means device pointer size_t size_in_bytes = SIZE * sizeof(int); // allocate the pointers h_a = (int *) malloc( size_in_bytes); h_b = (int *) malloc( size_in_bytes); h_c = (int *) malloc( size_in_bytes); cudaMalloc( (void**) &d_a, size_in_bytes ); cudaMalloc( (void**) &d_b, size_in_bytes ); cudaMalloc( (void**) &d_c, size_in_bytes ); // initialize the arrays before copying for (i = 0; i < SIZE; ++i) { h_a[i] = 1; h_b[i] = 2; } // copying from cpu to gpu cudaMemcpy( d_a, h_a, size_in_bytes, cudaMemcpyHostToDevice ); cudaMemcpy( d_b, h_b, size_in_bytes, cudaMemcpyHostToDevice ); // SIZE + NUM_THREADS makes sure that we create enough threads add<<< (SIZE + NUM_THREADS) / NUM_THREADS, NUM_THREADS >>>( d_a, d_b, d_c ); // copying from gpu to cpu cudaMemcpy( h_c, d_c, size_in_bytes, cudaMemcpyDeviceToHost ); if ( SIZE < 100 ) { for (i = 0; i < SIZE; ++i) { fprintf( stdout, "%d", h_c[i] ); } } free( h_a ); free( h_b ); free( h_c ); cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c ); return 0; }
bd06b580148296417746cc0be017165ee4e7c631.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <quda_internal.h> #include <tune_quda.h> #include <gauge_field_order.h> namespace quda { using namespace gauge; template <typename Order, int nDim, int dim> struct ExtractGhostExArg { Order order; int X[nDim]; int R[nDim]; int surfaceCB[nDim]; int A0[nDim]; int A1[nDim]; int B0[nDim]; int B1[nDim]; int C0[nDim]; int C1[nDim]; int fBody[nDim][nDim]; int fBuf[nDim][nDim]; int localParity[nDim]; int threads; ExtractGhostExArg(const Order &order, const int *X_, const int *R_, const int *surfaceCB_, const int *A0_, const int *A1_, const int *B0_, const int *B1_, const int *C0_, const int *C1_, const int fBody_[nDim][nDim], const int fBuf_[nDim][nDim], const int *localParity_) : order(order), threads(0) { threads = R_[dim]*(A1_[dim]-A0_[dim])*(B1_[dim]-B0_[dim])*(C1_[dim]-C0_[dim])*order.geometry; for (int d=0; d<nDim; d++) { X[d] = X_[d]; R[d] = R_[d]; surfaceCB[d] = surfaceCB_[d]; A0[d] = A0_[d]; A1[d] = A1_[d]; B0[d] = B0_[d]; B1[d] = B1_[d]; C0[d] = C0_[d]; C1[d] = C1_[d]; for (int e=0; e<nDim; e++) { fBody[d][e] = fBody_[d][e]; fBuf[d][e] = fBuf_[d][e]; } localParity[d] = localParity_[d]; } } }; template <typename Float, int length, int dim, typename Arg> __device__ __host__ void extractor(Arg &arg, int dir, int a, int b, int c, int d, int g, int parity) { typename mapper<Float>::type u[length]; int srcIdx = (a*arg.fBody[dim][0] + b*arg.fBody[dim][1] + c*arg.fBody[dim][2] + d*arg.fBody[dim][3]) >> 1; int dstIdx = (a*arg.fBuf[dim][0] + b*arg.fBuf[dim][1] + c*arg.fBuf[dim][2] + (d-(dir?arg.X[dim]:arg.R[dim]))*arg.fBuf[dim][3]) >> 1; // load the ghost element from the bulk arg.order.load(u, srcIdx, g, parity); // need dir dependence in write // srcIdx is used here to determine boundary condition arg.order.saveGhostEx(u, dstIdx, srcIdx, dir, dim, g, (parity+arg.localParity[dim])&1, arg.R); } template <typename Float, int length, int dim, typename Arg> __device__ __host__ void injector(Arg &arg, int dir, int a, int b, int c, int d, int g, int parity) { typename mapper<Float>::type u[length]; int srcIdx = (a*arg.fBuf[dim][0] + b*arg.fBuf[dim][1] + c*arg.fBuf[dim][2] + (d-dir*(arg.X[dim]+arg.R[dim]))*arg.fBuf[dim][3]) >> 1; int dstIdx = (a*arg.fBody[dim][0] + b*arg.fBody[dim][1] + c*arg.fBody[dim][2] + d*arg.fBody[dim][3]) >> 1; int oddness = (parity+arg.localParity[dim])&1; // need dir dependence in read // dstIdx is used here to determine boundary condition arg.order.loadGhostEx(u, srcIdx, dstIdx, dir, dim, g, oddness, arg.R); arg.order.save(u, dstIdx, g, parity); // save the ghost element into the bulk } /** Generic CPU gauge ghost extraction and packing NB This routines is specialized to four dimensions */ template <typename Float, int length, int nDim, int dim, typename Order, bool extract> void extractGhostEx(ExtractGhostExArg<Order,nDim,dim> arg) { typedef typename mapper<Float>::type RegType; for (int parity=0; parity<2; parity++) { // the following 4-way loop means this is specialized for 4 dimensions // dir = 0 backwards, dir = 1 forwards for (int dir = 0; dir<2; dir++) { int D0 = extract ? dir*arg.X[dim] + (1-dir)*arg.R[dim] : dir*(arg.X[dim] + arg.R[dim]); for (int d=D0; d<D0+arg.R[dim]; d++) { for (int a=arg.A0[dim]; a<arg.A1[dim]; a++) { // loop over the interior surface for (int b=arg.B0[dim]; b<arg.B1[dim]; b++) { // loop over the interior surface for (int c=arg.C0[dim]; c<arg.C1[dim]; c++) { // loop over the interior surface for (int g=0; g<arg.order.geometry; g++) { // we only do the extraction for parity we are currently working on int oddness = (a+b+c+d) & 1; if (oddness == parity) { if (extract) extractor<Float,length,dim>(arg, dir, a, b, c, d, g, parity); else injector<Float,length,dim>(arg, dir, a, b, c, d, g, parity); } // oddness == parity } // g } // c } // b } // a } // d } // dir } // parity } /** Generic GPU gauge ghost extraction and packing NB This routines is specialized to four dimensions FIXME this implementation will have two-way warp divergence */ /** Generic CPU gauge ghost extraction and packing NB This routines is specialized to four dimensions */ template <typename Float, int length, int nDim, int dim, typename Order, bool extract> __global__ void extractGhostExKernel(ExtractGhostExArg<Order,nDim,dim> arg) { typedef typename mapper<Float>::type RegType; // parallelize over parity and dir using block or grid /*for (int parity=0; parity<2; parity++) {*/ { int parity = blockIdx.z; // the following 4-way loop means this is specialized for 4 dimensions // dir = 0 backwards, dir = 1 forwards //for (int dir = 0; dir<2; dir++) { { int dir = blockIdx.y; // this will have two-warp divergence since we only do work on // one parity but parity alternates between threads // linear index used for writing into ghost buffer int X = blockIdx.x * blockDim.x + threadIdx.x; if (X >= arg.threads) return; int dA = arg.A1[dim]-arg.A0[dim]; int dB = arg.B1[dim]-arg.B0[dim]; int dC = arg.C1[dim]-arg.C0[dim]; int D0 = extract ? dir*arg.X[dim] + (1-dir)*arg.R[dim] : dir*(arg.X[dim] + arg.R[dim]); // thread order is optimized to maximize coalescing // X = (((g*R + d) * dA + a)*dB + b)*dC + c int gdab = X / dC; int c = arg.C0[dim] + X - gdab*dC; int gda = gdab / dB; int b = arg.B0[dim] + gdab - gda *dB; int gd = gda / dA; int a = arg.A0[dim] + gda - gd *dA; int g = gd / arg.R[dim]; int d = D0 + gd - g *arg.R[dim]; // we only do the extraction for parity we are currently working on int oddness = (a+b+c+d) & 1; if (oddness == parity) { if (extract) extractor<Float,length,dim>(arg, dir, a, b, c, d, g, parity); else injector<Float,length,dim>(arg, dir, a, b, c, d, g, parity); } // oddness == parity } // dir } // parity } template <typename Float, int length, int nDim, int dim, typename Order> class ExtractGhostEx : Tunable { ExtractGhostExArg<Order,nDim,dim> arg; int size; bool extract; const GaugeField &meta; QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0 ;} bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return size; } public: ExtractGhostEx(ExtractGhostExArg<Order,nDim,dim> &arg, bool extract, const GaugeField &meta, QudaFieldLocation location) : arg(arg), extract(extract), meta(meta), location(location) { int dA = arg.A1[dim]-arg.A0[dim]; int dB = arg.B1[dim]-arg.B0[dim]; int dC = arg.C1[dim]-arg.C0[dim]; size = arg.R[dim]*dA*dB*dC*arg.order.geometry; writeAuxString("prec=%lu,stride=%d,extract=%d,dimension=%d,geometry=%d", sizeof(Float),arg.order.stride, extract, dim, arg.order.geometry); } virtual ~ExtractGhostEx() { ; } void apply(const hipStream_t &stream) { if (extract) { if (location==QUDA_CPU_FIELD_LOCATION) { extractGhostEx<Float,length,nDim,dim,Order,true>(arg); } else { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); tp.grid.y = 2; tp.grid.z = 2; hipLaunchKernelGGL(( extractGhostExKernel<Float,length,nDim,dim,Order,true>) , dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg); } } else { // we are injecting if (location==QUDA_CPU_FIELD_LOCATION) { extractGhostEx<Float,length,nDim,dim,Order,false>(arg); } else { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); tp.grid.y = 2; tp.grid.z = 2; hipLaunchKernelGGL(( extractGhostExKernel<Float,length,nDim,dim,Order,false>) , dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg); } } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return 0; } long long bytes() const { return 2 * 2 * 2 * size * arg.order.Bytes(); } // 2 for i/o }; /** Generic CPU gauge ghost extraction and packing NB This routines is specialized to four dimensions @param E the extended gauge dimensions @param R array holding the radius of the extended region @param extract Whether we are extracting or injecting the ghost zone */ template <typename Float, int length, typename Order> void extractGhostEx(Order order, const int dim, const int *surfaceCB, const int *E, const int *R, bool extract, const GaugeField &u, QudaFieldLocation location) { const int nDim = 4; //loop variables: a, b, c with a the most signifcant and c the least significant //A0, B0, C0 the minimum value //A0, B0, C0 the maximum value int X[nDim]; // compute interior dimensions for (int d=0; d<nDim; d++) X[d] = E[d] - 2*R[d]; //..........x..........y............z.............t int A0[nDim] = {R[3], R[3], R[3], 0}; int A1[nDim] = {X[3]+R[3], X[3]+R[3], X[3]+R[3], X[2]+2*R[2]}; int B0[nDim] = {R[2], R[2], 0, 0}; int B1[nDim] = {X[2]+R[2], X[2]+R[2], X[1]+2*R[1], X[1]+2*R[1]}; int C0[nDim] = {R[1], 0, 0, 0}; int C1[nDim] = {X[1]+R[1], X[0]+2*R[0], X[0]+2*R[0], X[0]+2*R[0]}; int fSrc[nDim][nDim] = { {E[2]*E[1]*E[0], E[1]*E[0], E[0], 1}, {E[2]*E[1]*E[0], E[1]*E[0], 1, E[0]}, {E[2]*E[1]*E[0], E[0], 1, E[1]*E[0]}, {E[1]*E[0], E[0], 1, E[2]*E[1]*E[0]} }; int fBuf[nDim][nDim]={ {E[2]*E[1], E[1], 1, E[3]*E[2]*E[1]}, {E[2]*E[0], E[0], 1, E[3]*E[2]*E[0]}, {E[1]*E[0], E[0], 1, E[3]*E[1]*E[0]}, {E[1]*E[0], E[0], 1, E[2]*E[1]*E[0]} }; //set the local processor parity //switching odd and even ghost gauge when that dimension size is odd //only switch if X[dir] is odd and the gridsize in that dimension is greater than 1 // FIXME - I don't understand this, shouldn't it be commDim(dim) == 0 ? int localParity[nDim]; for (int d=0; d<nDim; d++) localParity[dim] = ((X[dim] % 2 ==1) && (commDim(dim) > 1)) ? 1 : 0; // localParity[dim] = (X[dim]%2==0 || commDim(dim)) ? 0 : 1; if (dim==0) { ExtractGhostExArg<Order,nDim,0> arg(order, X, R, surfaceCB, A0, A1, B0, B1, C0, C1, fSrc, fBuf, localParity); ExtractGhostEx<Float,length,nDim,0,Order> extractor(arg, extract, u, location); extractor.apply(0); } else if (dim==1) { ExtractGhostExArg<Order,nDim,1> arg(order, X, R, surfaceCB, A0, A1, B0, B1, C0, C1, fSrc, fBuf, localParity); ExtractGhostEx<Float,length,nDim,1,Order> extractor(arg, extract, u, location); extractor.apply(0); } else if (dim==2) { ExtractGhostExArg<Order,nDim,2> arg(order, X, R, surfaceCB, A0, A1, B0, B1, C0, C1, fSrc, fBuf, localParity); ExtractGhostEx<Float,length,nDim,2,Order> extractor(arg, extract, u, location); extractor.apply(0); } else if (dim==3) { ExtractGhostExArg<Order,nDim,3> arg(order, X, R, surfaceCB, A0, A1, B0, B1, C0, C1, fSrc, fBuf, localParity); ExtractGhostEx<Float,length,nDim,3,Order> extractor(arg, extract, u, location); extractor.apply(0); } else { errorQuda("Invalid dim=%d", dim); } checkCudaError(); } /** This is the template driver for extractGhost */ template <typename Float> void extractGhostEx(const GaugeField &u, int dim, const int *R, Float **Ghost, bool extract) { const int length = 18; QudaFieldLocation location = (typeid(u)==typeid(cudaGaugeField)) ? QUDA_CUDA_FIELD_LOCATION : QUDA_CPU_FIELD_LOCATION; if (u.isNative()) { if (u.Reconstruct() == QUDA_RECONSTRUCT_NO) { if (typeid(Float)==typeid(short) && u.LinkType() == QUDA_ASQTAD_FAT_LINKS) { extractGhostEx<short,length>(FloatNOrder<short,length,2,19>(u, 0, (short**)Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); } else { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; extractGhostEx<Float,length>(G(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); } } else if (u.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G; extractGhostEx<Float,length>(G(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); } else if (u.Reconstruct() == QUDA_RECONSTRUCT_8) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type G; extractGhostEx<Float,length>(G(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); } else if (u.Reconstruct() == QUDA_RECONSTRUCT_13) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_13>::type G; extractGhostEx<Float,length>(G(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); } else if (u.Reconstruct() == QUDA_RECONSTRUCT_9) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_13>::type G; extractGhostEx<Float,length>(G(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); } } else if (u.Order() == QUDA_QDP_GAUGE_ORDER) { #ifdef BUILD_QDP_INTERFACE extractGhostEx<Float,length>(QDPOrder<Float,length>(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); #else errorQuda("QDP interface has not been built\n"); #endif } else if (u.Order() == QUDA_QDPJIT_GAUGE_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE extractGhostEx<Float,length>(QDPJITOrder<Float,length>(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (u.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) { #ifdef BUILD_CPS_INTERFACE extractGhostEx<Float,length>(CPSOrder<Float,length>(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); #else errorQuda("CPS interface has not been built\n"); #endif } else if (u.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE extractGhostEx<Float,length>(MILCOrder<Float,length>(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); #else errorQuda("MILC interface has not been built\n"); #endif } else if (u.Order() == QUDA_BQCD_GAUGE_ORDER) { #ifdef BUILD_BQCD_INTERFACE extractGhostEx<Float,length>(BQCDOrder<Float,length>(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); #else errorQuda("BQCD interface has not been built\n"); #endif } else if (u.Order() == QUDA_TIFR_GAUGE_ORDER) { #ifdef BUILD_TIFR_INTERFACE extractGhostEx<Float,length>(TIFROrder<Float,length>(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); #else errorQuda("TIFR interface has not been built\n"); #endif } else { errorQuda("Gauge field %d order not supported", u.Order()); } } void extractExtendedGaugeGhost(const GaugeField &u, int dim, const int *R, void **ghost, bool extract) { if (u.Precision() == QUDA_DOUBLE_PRECISION) { extractGhostEx(u, dim, R, (double**)ghost, extract); } else if (u.Precision() == QUDA_SINGLE_PRECISION) { extractGhostEx(u, dim, R, (float**)ghost, extract); } else if (u.Precision() == QUDA_HALF_PRECISION) { extractGhostEx(u, dim, R, (short**)ghost, extract); } else { errorQuda("Unknown precision type %d", u.Precision()); } } } // namespace quda
bd06b580148296417746cc0be017165ee4e7c631.cu
#include <quda_internal.h> #include <tune_quda.h> #include <gauge_field_order.h> namespace quda { using namespace gauge; template <typename Order, int nDim, int dim> struct ExtractGhostExArg { Order order; int X[nDim]; int R[nDim]; int surfaceCB[nDim]; int A0[nDim]; int A1[nDim]; int B0[nDim]; int B1[nDim]; int C0[nDim]; int C1[nDim]; int fBody[nDim][nDim]; int fBuf[nDim][nDim]; int localParity[nDim]; int threads; ExtractGhostExArg(const Order &order, const int *X_, const int *R_, const int *surfaceCB_, const int *A0_, const int *A1_, const int *B0_, const int *B1_, const int *C0_, const int *C1_, const int fBody_[nDim][nDim], const int fBuf_[nDim][nDim], const int *localParity_) : order(order), threads(0) { threads = R_[dim]*(A1_[dim]-A0_[dim])*(B1_[dim]-B0_[dim])*(C1_[dim]-C0_[dim])*order.geometry; for (int d=0; d<nDim; d++) { X[d] = X_[d]; R[d] = R_[d]; surfaceCB[d] = surfaceCB_[d]; A0[d] = A0_[d]; A1[d] = A1_[d]; B0[d] = B0_[d]; B1[d] = B1_[d]; C0[d] = C0_[d]; C1[d] = C1_[d]; for (int e=0; e<nDim; e++) { fBody[d][e] = fBody_[d][e]; fBuf[d][e] = fBuf_[d][e]; } localParity[d] = localParity_[d]; } } }; template <typename Float, int length, int dim, typename Arg> __device__ __host__ void extractor(Arg &arg, int dir, int a, int b, int c, int d, int g, int parity) { typename mapper<Float>::type u[length]; int srcIdx = (a*arg.fBody[dim][0] + b*arg.fBody[dim][1] + c*arg.fBody[dim][2] + d*arg.fBody[dim][3]) >> 1; int dstIdx = (a*arg.fBuf[dim][0] + b*arg.fBuf[dim][1] + c*arg.fBuf[dim][2] + (d-(dir?arg.X[dim]:arg.R[dim]))*arg.fBuf[dim][3]) >> 1; // load the ghost element from the bulk arg.order.load(u, srcIdx, g, parity); // need dir dependence in write // srcIdx is used here to determine boundary condition arg.order.saveGhostEx(u, dstIdx, srcIdx, dir, dim, g, (parity+arg.localParity[dim])&1, arg.R); } template <typename Float, int length, int dim, typename Arg> __device__ __host__ void injector(Arg &arg, int dir, int a, int b, int c, int d, int g, int parity) { typename mapper<Float>::type u[length]; int srcIdx = (a*arg.fBuf[dim][0] + b*arg.fBuf[dim][1] + c*arg.fBuf[dim][2] + (d-dir*(arg.X[dim]+arg.R[dim]))*arg.fBuf[dim][3]) >> 1; int dstIdx = (a*arg.fBody[dim][0] + b*arg.fBody[dim][1] + c*arg.fBody[dim][2] + d*arg.fBody[dim][3]) >> 1; int oddness = (parity+arg.localParity[dim])&1; // need dir dependence in read // dstIdx is used here to determine boundary condition arg.order.loadGhostEx(u, srcIdx, dstIdx, dir, dim, g, oddness, arg.R); arg.order.save(u, dstIdx, g, parity); // save the ghost element into the bulk } /** Generic CPU gauge ghost extraction and packing NB This routines is specialized to four dimensions */ template <typename Float, int length, int nDim, int dim, typename Order, bool extract> void extractGhostEx(ExtractGhostExArg<Order,nDim,dim> arg) { typedef typename mapper<Float>::type RegType; for (int parity=0; parity<2; parity++) { // the following 4-way loop means this is specialized for 4 dimensions // dir = 0 backwards, dir = 1 forwards for (int dir = 0; dir<2; dir++) { int D0 = extract ? dir*arg.X[dim] + (1-dir)*arg.R[dim] : dir*(arg.X[dim] + arg.R[dim]); for (int d=D0; d<D0+arg.R[dim]; d++) { for (int a=arg.A0[dim]; a<arg.A1[dim]; a++) { // loop over the interior surface for (int b=arg.B0[dim]; b<arg.B1[dim]; b++) { // loop over the interior surface for (int c=arg.C0[dim]; c<arg.C1[dim]; c++) { // loop over the interior surface for (int g=0; g<arg.order.geometry; g++) { // we only do the extraction for parity we are currently working on int oddness = (a+b+c+d) & 1; if (oddness == parity) { if (extract) extractor<Float,length,dim>(arg, dir, a, b, c, d, g, parity); else injector<Float,length,dim>(arg, dir, a, b, c, d, g, parity); } // oddness == parity } // g } // c } // b } // a } // d } // dir } // parity } /** Generic GPU gauge ghost extraction and packing NB This routines is specialized to four dimensions FIXME this implementation will have two-way warp divergence */ /** Generic CPU gauge ghost extraction and packing NB This routines is specialized to four dimensions */ template <typename Float, int length, int nDim, int dim, typename Order, bool extract> __global__ void extractGhostExKernel(ExtractGhostExArg<Order,nDim,dim> arg) { typedef typename mapper<Float>::type RegType; // parallelize over parity and dir using block or grid /*for (int parity=0; parity<2; parity++) {*/ { int parity = blockIdx.z; // the following 4-way loop means this is specialized for 4 dimensions // dir = 0 backwards, dir = 1 forwards //for (int dir = 0; dir<2; dir++) { { int dir = blockIdx.y; // this will have two-warp divergence since we only do work on // one parity but parity alternates between threads // linear index used for writing into ghost buffer int X = blockIdx.x * blockDim.x + threadIdx.x; if (X >= arg.threads) return; int dA = arg.A1[dim]-arg.A0[dim]; int dB = arg.B1[dim]-arg.B0[dim]; int dC = arg.C1[dim]-arg.C0[dim]; int D0 = extract ? dir*arg.X[dim] + (1-dir)*arg.R[dim] : dir*(arg.X[dim] + arg.R[dim]); // thread order is optimized to maximize coalescing // X = (((g*R + d) * dA + a)*dB + b)*dC + c int gdab = X / dC; int c = arg.C0[dim] + X - gdab*dC; int gda = gdab / dB; int b = arg.B0[dim] + gdab - gda *dB; int gd = gda / dA; int a = arg.A0[dim] + gda - gd *dA; int g = gd / arg.R[dim]; int d = D0 + gd - g *arg.R[dim]; // we only do the extraction for parity we are currently working on int oddness = (a+b+c+d) & 1; if (oddness == parity) { if (extract) extractor<Float,length,dim>(arg, dir, a, b, c, d, g, parity); else injector<Float,length,dim>(arg, dir, a, b, c, d, g, parity); } // oddness == parity } // dir } // parity } template <typename Float, int length, int nDim, int dim, typename Order> class ExtractGhostEx : Tunable { ExtractGhostExArg<Order,nDim,dim> arg; int size; bool extract; const GaugeField &meta; QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0 ;} bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return size; } public: ExtractGhostEx(ExtractGhostExArg<Order,nDim,dim> &arg, bool extract, const GaugeField &meta, QudaFieldLocation location) : arg(arg), extract(extract), meta(meta), location(location) { int dA = arg.A1[dim]-arg.A0[dim]; int dB = arg.B1[dim]-arg.B0[dim]; int dC = arg.C1[dim]-arg.C0[dim]; size = arg.R[dim]*dA*dB*dC*arg.order.geometry; writeAuxString("prec=%lu,stride=%d,extract=%d,dimension=%d,geometry=%d", sizeof(Float),arg.order.stride, extract, dim, arg.order.geometry); } virtual ~ExtractGhostEx() { ; } void apply(const cudaStream_t &stream) { if (extract) { if (location==QUDA_CPU_FIELD_LOCATION) { extractGhostEx<Float,length,nDim,dim,Order,true>(arg); } else { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); tp.grid.y = 2; tp.grid.z = 2; extractGhostExKernel<Float,length,nDim,dim,Order,true> <<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg); } } else { // we are injecting if (location==QUDA_CPU_FIELD_LOCATION) { extractGhostEx<Float,length,nDim,dim,Order,false>(arg); } else { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); tp.grid.y = 2; tp.grid.z = 2; extractGhostExKernel<Float,length,nDim,dim,Order,false> <<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg); } } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return 0; } long long bytes() const { return 2 * 2 * 2 * size * arg.order.Bytes(); } // 2 for i/o }; /** Generic CPU gauge ghost extraction and packing NB This routines is specialized to four dimensions @param E the extended gauge dimensions @param R array holding the radius of the extended region @param extract Whether we are extracting or injecting the ghost zone */ template <typename Float, int length, typename Order> void extractGhostEx(Order order, const int dim, const int *surfaceCB, const int *E, const int *R, bool extract, const GaugeField &u, QudaFieldLocation location) { const int nDim = 4; //loop variables: a, b, c with a the most signifcant and c the least significant //A0, B0, C0 the minimum value //A0, B0, C0 the maximum value int X[nDim]; // compute interior dimensions for (int d=0; d<nDim; d++) X[d] = E[d] - 2*R[d]; //..........x..........y............z.............t int A0[nDim] = {R[3], R[3], R[3], 0}; int A1[nDim] = {X[3]+R[3], X[3]+R[3], X[3]+R[3], X[2]+2*R[2]}; int B0[nDim] = {R[2], R[2], 0, 0}; int B1[nDim] = {X[2]+R[2], X[2]+R[2], X[1]+2*R[1], X[1]+2*R[1]}; int C0[nDim] = {R[1], 0, 0, 0}; int C1[nDim] = {X[1]+R[1], X[0]+2*R[0], X[0]+2*R[0], X[0]+2*R[0]}; int fSrc[nDim][nDim] = { {E[2]*E[1]*E[0], E[1]*E[0], E[0], 1}, {E[2]*E[1]*E[0], E[1]*E[0], 1, E[0]}, {E[2]*E[1]*E[0], E[0], 1, E[1]*E[0]}, {E[1]*E[0], E[0], 1, E[2]*E[1]*E[0]} }; int fBuf[nDim][nDim]={ {E[2]*E[1], E[1], 1, E[3]*E[2]*E[1]}, {E[2]*E[0], E[0], 1, E[3]*E[2]*E[0]}, {E[1]*E[0], E[0], 1, E[3]*E[1]*E[0]}, {E[1]*E[0], E[0], 1, E[2]*E[1]*E[0]} }; //set the local processor parity //switching odd and even ghost gauge when that dimension size is odd //only switch if X[dir] is odd and the gridsize in that dimension is greater than 1 // FIXME - I don't understand this, shouldn't it be commDim(dim) == 0 ? int localParity[nDim]; for (int d=0; d<nDim; d++) localParity[dim] = ((X[dim] % 2 ==1) && (commDim(dim) > 1)) ? 1 : 0; // localParity[dim] = (X[dim]%2==0 || commDim(dim)) ? 0 : 1; if (dim==0) { ExtractGhostExArg<Order,nDim,0> arg(order, X, R, surfaceCB, A0, A1, B0, B1, C0, C1, fSrc, fBuf, localParity); ExtractGhostEx<Float,length,nDim,0,Order> extractor(arg, extract, u, location); extractor.apply(0); } else if (dim==1) { ExtractGhostExArg<Order,nDim,1> arg(order, X, R, surfaceCB, A0, A1, B0, B1, C0, C1, fSrc, fBuf, localParity); ExtractGhostEx<Float,length,nDim,1,Order> extractor(arg, extract, u, location); extractor.apply(0); } else if (dim==2) { ExtractGhostExArg<Order,nDim,2> arg(order, X, R, surfaceCB, A0, A1, B0, B1, C0, C1, fSrc, fBuf, localParity); ExtractGhostEx<Float,length,nDim,2,Order> extractor(arg, extract, u, location); extractor.apply(0); } else if (dim==3) { ExtractGhostExArg<Order,nDim,3> arg(order, X, R, surfaceCB, A0, A1, B0, B1, C0, C1, fSrc, fBuf, localParity); ExtractGhostEx<Float,length,nDim,3,Order> extractor(arg, extract, u, location); extractor.apply(0); } else { errorQuda("Invalid dim=%d", dim); } checkCudaError(); } /** This is the template driver for extractGhost */ template <typename Float> void extractGhostEx(const GaugeField &u, int dim, const int *R, Float **Ghost, bool extract) { const int length = 18; QudaFieldLocation location = (typeid(u)==typeid(cudaGaugeField)) ? QUDA_CUDA_FIELD_LOCATION : QUDA_CPU_FIELD_LOCATION; if (u.isNative()) { if (u.Reconstruct() == QUDA_RECONSTRUCT_NO) { if (typeid(Float)==typeid(short) && u.LinkType() == QUDA_ASQTAD_FAT_LINKS) { extractGhostEx<short,length>(FloatNOrder<short,length,2,19>(u, 0, (short**)Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); } else { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; extractGhostEx<Float,length>(G(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); } } else if (u.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G; extractGhostEx<Float,length>(G(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); } else if (u.Reconstruct() == QUDA_RECONSTRUCT_8) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type G; extractGhostEx<Float,length>(G(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); } else if (u.Reconstruct() == QUDA_RECONSTRUCT_13) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_13>::type G; extractGhostEx<Float,length>(G(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); } else if (u.Reconstruct() == QUDA_RECONSTRUCT_9) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_13>::type G; extractGhostEx<Float,length>(G(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); } } else if (u.Order() == QUDA_QDP_GAUGE_ORDER) { #ifdef BUILD_QDP_INTERFACE extractGhostEx<Float,length>(QDPOrder<Float,length>(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); #else errorQuda("QDP interface has not been built\n"); #endif } else if (u.Order() == QUDA_QDPJIT_GAUGE_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE extractGhostEx<Float,length>(QDPJITOrder<Float,length>(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (u.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) { #ifdef BUILD_CPS_INTERFACE extractGhostEx<Float,length>(CPSOrder<Float,length>(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); #else errorQuda("CPS interface has not been built\n"); #endif } else if (u.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE extractGhostEx<Float,length>(MILCOrder<Float,length>(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); #else errorQuda("MILC interface has not been built\n"); #endif } else if (u.Order() == QUDA_BQCD_GAUGE_ORDER) { #ifdef BUILD_BQCD_INTERFACE extractGhostEx<Float,length>(BQCDOrder<Float,length>(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); #else errorQuda("BQCD interface has not been built\n"); #endif } else if (u.Order() == QUDA_TIFR_GAUGE_ORDER) { #ifdef BUILD_TIFR_INTERFACE extractGhostEx<Float,length>(TIFROrder<Float,length>(u, 0, Ghost), dim, u.SurfaceCB(), u.X(), R, extract, u, location); #else errorQuda("TIFR interface has not been built\n"); #endif } else { errorQuda("Gauge field %d order not supported", u.Order()); } } void extractExtendedGaugeGhost(const GaugeField &u, int dim, const int *R, void **ghost, bool extract) { if (u.Precision() == QUDA_DOUBLE_PRECISION) { extractGhostEx(u, dim, R, (double**)ghost, extract); } else if (u.Precision() == QUDA_SINGLE_PRECISION) { extractGhostEx(u, dim, R, (float**)ghost, extract); } else if (u.Precision() == QUDA_HALF_PRECISION) { extractGhostEx(u, dim, R, (short**)ghost, extract); } else { errorQuda("Unknown precision type %d", u.Precision()); } } } // namespace quda
7822e15514784d6c546170b097ffd3eb50b79f3a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "flexflow/ops/groupby.h" #include "flexflow/utils/cuda_helper.h" #include <math.h> #include <stdio.h> #define MAX_K 4 #define MAX_BATCH_SIZE 64 #define MAX_N 12 namespace FlexFlow { __global__ void gb_forward_kernel(float const *input, int const *exp_assign, float **outputs, int n, // num experts int k, // chosen experts float alpha, // factor additional memory assigned int batch_size, int data_dim) { __shared__ float *chosen_exp_preds[MAX_K * MAX_BATCH_SIZE]; // Get pred pointers, single thread per block if (threadIdx.x == 0) { int exp_tensor_rows = ceil(alpha * k / n * batch_size); int expert_idx[MAX_N] = {0}; for (int i = 0; i < k * batch_size; i++) { // Get pointer to chosen expert predictions int expert = exp_assign[i]; if (expert_idx[expert] >= exp_tensor_rows) { // dropped sample chosen_exp_preds[i] = 0; continue; } chosen_exp_preds[i] = outputs[expert] + expert_idx[expert] * data_dim; expert_idx[expert]++; } } __syncthreads(); // compute output CUDA_KERNEL_LOOP(i, k * batch_size * data_dim) { if (chosen_exp_preds[i / data_dim] != 0) { float a = input[(i / (k * data_dim)) * data_dim + i % data_dim]; chosen_exp_preds[i / data_dim][i % data_dim] = a; } } } __global__ void gb_backward_kernel(float *input_grad, int const *exp_assign, float **output_grads, int n, // num experts int k, // chosen experts float alpha, // factor additional memory assigned int batch_size, int data_dim) { __shared__ float *chosen_exp_grads[MAX_K * MAX_BATCH_SIZE]; assert(k <= MAX_K); assert(batch_size <= MAX_BATCH_SIZE); assert(n <= MAX_N); // Get pred pointers, single thread if (threadIdx.x == 0) { int exp_tensor_rows = ceil(alpha * k / n * batch_size); int expert_idx[MAX_N] = {0}; for (int i = 0; i < k * batch_size; i++) { // Get pointer to chosen expert predictions int expert = exp_assign[i]; if (expert_idx[expert] >= exp_tensor_rows) { // dropped sample chosen_exp_grads[i] = nullptr; continue; } chosen_exp_grads[i] = output_grads[expert] + expert_idx[expert] * data_dim; expert_idx[expert]++; } } __syncthreads(); // compute output CUDA_KERNEL_LOOP(i, k * batch_size * data_dim) { if (chosen_exp_grads[i / data_dim] != nullptr) { input_grad[(i / (k * data_dim)) * data_dim + i % data_dim] = chosen_exp_grads[i / data_dim][i % data_dim]; } } } /*static*/ void Group_by::forward_kernel_wrapper(GroupByMeta const *m, float const *input, int const *exp_assign, float **outputs, int n, // num experts int k, // chosen experts int batch_size, int data_dim) { // TODO: why cublas/cudnn stream is needed here? float alpha = m->alpha; hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start, stream); } // call forward kernel hipMemcpyAsync(m->dev_region_ptrs, outputs, n * sizeof(float *), hipMemcpyHostToDevice, stream); hipLaunchKernelGGL(( gb_forward_kernel), dim3(GET_BLOCKS(batch_size * k * data_dim)), dim3(min(CUDA_NUM_THREADS, (int)(batch_size * k * data_dim))), 0, stream, input, exp_assign, m->dev_region_ptrs, n, k, alpha, batch_size, data_dim); if (m->profiling) { hipEventRecord(t_end, stream); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("[GroupBy] forward time = %.2lfms\n", elapsed); } } void Group_by::backward_kernel_wrapper(GroupByMeta const *m, float *input_grad, int const *exp_assign, float **output_grads, int n, // num experts int k, // chosen experts int batch_size, int data_dim) { float alpha = m->alpha; // TODO: why cublas/cudnn stream is needed here hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start, stream); } // call forward kernel hipMemcpyAsync(m->dev_region_ptrs, output_grads, n * sizeof(float *), hipMemcpyHostToDevice, stream); hipLaunchKernelGGL(( gb_backward_kernel), dim3(GET_BLOCKS(batch_size * k * data_dim)), dim3(min(CUDA_NUM_THREADS, (int)(batch_size * k * data_dim))), 0, stream, input_grad, exp_assign, m->dev_region_ptrs, n, k, alpha, batch_size, data_dim); if (m->profiling) { hipEventRecord(t_end, stream); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("[GroupBy] backward time = %.2lfms\n", elapsed); } } GroupByMeta::GroupByMeta(FFHandler handler, int n, float _alpha) : OpMeta(handler), alpha(_alpha) { checkCUDA(hipMalloc(&dev_region_ptrs, n * sizeof(float *))); } GroupByMeta::~GroupByMeta(void) { checkCUDA(hipFree(&dev_region_ptrs)); } }; // namespace FlexFlow
7822e15514784d6c546170b097ffd3eb50b79f3a.cu
/* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "flexflow/ops/groupby.h" #include "flexflow/utils/cuda_helper.h" #include <math.h> #include <stdio.h> #define MAX_K 4 #define MAX_BATCH_SIZE 64 #define MAX_N 12 namespace FlexFlow { __global__ void gb_forward_kernel(float const *input, int const *exp_assign, float **outputs, int n, // num experts int k, // chosen experts float alpha, // factor additional memory assigned int batch_size, int data_dim) { __shared__ float *chosen_exp_preds[MAX_K * MAX_BATCH_SIZE]; // Get pred pointers, single thread per block if (threadIdx.x == 0) { int exp_tensor_rows = ceil(alpha * k / n * batch_size); int expert_idx[MAX_N] = {0}; for (int i = 0; i < k * batch_size; i++) { // Get pointer to chosen expert predictions int expert = exp_assign[i]; if (expert_idx[expert] >= exp_tensor_rows) { // dropped sample chosen_exp_preds[i] = 0; continue; } chosen_exp_preds[i] = outputs[expert] + expert_idx[expert] * data_dim; expert_idx[expert]++; } } __syncthreads(); // compute output CUDA_KERNEL_LOOP(i, k * batch_size * data_dim) { if (chosen_exp_preds[i / data_dim] != 0) { float a = input[(i / (k * data_dim)) * data_dim + i % data_dim]; chosen_exp_preds[i / data_dim][i % data_dim] = a; } } } __global__ void gb_backward_kernel(float *input_grad, int const *exp_assign, float **output_grads, int n, // num experts int k, // chosen experts float alpha, // factor additional memory assigned int batch_size, int data_dim) { __shared__ float *chosen_exp_grads[MAX_K * MAX_BATCH_SIZE]; assert(k <= MAX_K); assert(batch_size <= MAX_BATCH_SIZE); assert(n <= MAX_N); // Get pred pointers, single thread if (threadIdx.x == 0) { int exp_tensor_rows = ceil(alpha * k / n * batch_size); int expert_idx[MAX_N] = {0}; for (int i = 0; i < k * batch_size; i++) { // Get pointer to chosen expert predictions int expert = exp_assign[i]; if (expert_idx[expert] >= exp_tensor_rows) { // dropped sample chosen_exp_grads[i] = nullptr; continue; } chosen_exp_grads[i] = output_grads[expert] + expert_idx[expert] * data_dim; expert_idx[expert]++; } } __syncthreads(); // compute output CUDA_KERNEL_LOOP(i, k * batch_size * data_dim) { if (chosen_exp_grads[i / data_dim] != nullptr) { input_grad[(i / (k * data_dim)) * data_dim + i % data_dim] = chosen_exp_grads[i / data_dim][i % data_dim]; } } } /*static*/ void Group_by::forward_kernel_wrapper(GroupByMeta const *m, float const *input, int const *exp_assign, float **outputs, int n, // num experts int k, // chosen experts int batch_size, int data_dim) { // TODO: why cublas/cudnn stream is needed here? float alpha = m->alpha; cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start, stream); } // call forward kernel cudaMemcpyAsync(m->dev_region_ptrs, outputs, n * sizeof(float *), cudaMemcpyHostToDevice, stream); gb_forward_kernel<<<GET_BLOCKS(batch_size * k * data_dim), min(CUDA_NUM_THREADS, (int)(batch_size * k * data_dim)), 0, stream>>>( input, exp_assign, m->dev_region_ptrs, n, k, alpha, batch_size, data_dim); if (m->profiling) { cudaEventRecord(t_end, stream); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("[GroupBy] forward time = %.2lfms\n", elapsed); } } void Group_by::backward_kernel_wrapper(GroupByMeta const *m, float *input_grad, int const *exp_assign, float **output_grads, int n, // num experts int k, // chosen experts int batch_size, int data_dim) { float alpha = m->alpha; // TODO: why cublas/cudnn stream is needed here cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start, stream); } // call forward kernel cudaMemcpyAsync(m->dev_region_ptrs, output_grads, n * sizeof(float *), cudaMemcpyHostToDevice, stream); gb_backward_kernel<<<GET_BLOCKS(batch_size * k * data_dim), min(CUDA_NUM_THREADS, (int)(batch_size * k * data_dim)), 0, stream>>>(input_grad, exp_assign, m->dev_region_ptrs, n, k, alpha, batch_size, data_dim); if (m->profiling) { cudaEventRecord(t_end, stream); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("[GroupBy] backward time = %.2lfms\n", elapsed); } } GroupByMeta::GroupByMeta(FFHandler handler, int n, float _alpha) : OpMeta(handler), alpha(_alpha) { checkCUDA(cudaMalloc(&dev_region_ptrs, n * sizeof(float *))); } GroupByMeta::~GroupByMeta(void) { checkCUDA(cudaFree(&dev_region_ptrs)); } }; // namespace FlexFlow
a57e7c503f362cbdc5b3ac1e04f64b9e18a20d4a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/limits.hpp" namespace cv { namespace gpu { namespace device { namespace bgfg_gmg { __constant__ int c_width; __constant__ int c_height; __constant__ float c_minVal; __constant__ float c_maxVal; __constant__ int c_quantizationLevels; __constant__ float c_backgroundPrior; __constant__ float c_decisionThreshold; __constant__ int c_maxFeatures; __constant__ int c_numInitializationFrames; void loadConstants(int width, int height, float minVal, float maxVal, int quantizationLevels, float backgroundPrior, float decisionThreshold, int maxFeatures, int numInitializationFrames) { cudaSafeCall( hipMemcpyToSymbol(c_width, &width, sizeof(width)) ); cudaSafeCall( hipMemcpyToSymbol(c_height, &height, sizeof(height)) ); cudaSafeCall( hipMemcpyToSymbol(c_minVal, &minVal, sizeof(minVal)) ); cudaSafeCall( hipMemcpyToSymbol(c_maxVal, &maxVal, sizeof(maxVal)) ); cudaSafeCall( hipMemcpyToSymbol(c_quantizationLevels, &quantizationLevels, sizeof(quantizationLevels)) ); cudaSafeCall( hipMemcpyToSymbol(c_backgroundPrior, &backgroundPrior, sizeof(backgroundPrior)) ); cudaSafeCall( hipMemcpyToSymbol(c_decisionThreshold, &decisionThreshold, sizeof(decisionThreshold)) ); cudaSafeCall( hipMemcpyToSymbol(c_maxFeatures, &maxFeatures, sizeof(maxFeatures)) ); cudaSafeCall( hipMemcpyToSymbol(c_numInitializationFrames, &numInitializationFrames, sizeof(numInitializationFrames)) ); } __device__ float findFeature(const int color, const PtrStepi& colors, const PtrStepf& weights, const int x, const int y, const int nfeatures) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) { if (color == colors(fy, x)) return weights(fy, x); } // not in histogram, so return 0. return 0.0f; } __device__ void normalizeHistogram(PtrStepf weights, const int x, const int y, const int nfeatures) { float total = 0.0f; for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) total += weights(fy, x); if (total != 0.0f) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) weights(fy, x) /= total; } } __device__ bool insertFeature(const int color, const float weight, PtrStepi colors, PtrStepf weights, const int x, const int y, int& nfeatures) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) { if (color == colors(fy, x)) { // feature in histogram weights(fy, x) += weight; return false; } } if (nfeatures == c_maxFeatures) { // discard oldest feature int idx = -1; float minVal = numeric_limits<float>::max(); for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) { const float w = weights(fy, x); if (w < minVal) { minVal = w; idx = fy; } } colors(idx, x) = color; weights(idx, x) = weight; return false; } colors(nfeatures * c_height + y, x) = color; weights(nfeatures * c_height + y, x) = weight; ++nfeatures; return true; } namespace detail { template <int cn> struct Quantization { template <typename T> __device__ static int apply(const T& val) { int res = 0; res |= static_cast<int>((val.x - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)); res |= static_cast<int>((val.y - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 8; res |= static_cast<int>((val.z - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 16; return res; } }; template <> struct Quantization<1> { template <typename T> __device__ static int apply(T val) { return static_cast<int>((val - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)); } }; } template <typename T> struct Quantization : detail::Quantization<VecTraits<T>::cn> {}; template <typename SrcT> __global__ void update(const PtrStep<SrcT> frame, PtrStepb fgmask, PtrStepi colors_, PtrStepf weights_, PtrStepi nfeatures_, const int frameNum, const float learningRate, const bool updateBackgroundModel) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= c_width || y >= c_height) return; const SrcT pix = frame(y, x); const int newFeatureColor = Quantization<SrcT>::apply(pix); int nfeatures = nfeatures_(y, x); if (frameNum >= c_numInitializationFrames) { // typical operation const float weight = findFeature(newFeatureColor, colors_, weights_, x, y, nfeatures); // see Godbehere, Matsukawa, Goldberg (2012) for reasoning behind this implementation of Bayes rule const float posterior = (weight * c_backgroundPrior) / (weight * c_backgroundPrior + (1.0f - weight) * (1.0f - c_backgroundPrior)); const bool isForeground = ((1.0f - posterior) > c_decisionThreshold); fgmask(y, x) = (uchar)(-isForeground); // update histogram. if (updateBackgroundModel) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) weights_(fy, x) *= 1.0f - learningRate; bool inserted = insertFeature(newFeatureColor, learningRate, colors_, weights_, x, y, nfeatures); if (inserted) { normalizeHistogram(weights_, x, y, nfeatures); nfeatures_(y, x) = nfeatures; } } } else if (updateBackgroundModel) { // training-mode update insertFeature(newFeatureColor, 1.0f, colors_, weights_, x, y, nfeatures); if (frameNum == c_numInitializationFrames - 1) normalizeHistogram(weights_, x, y, nfeatures); } } template <typename SrcT> void update_gpu(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y)); cudaSafeCall( hipFuncSetCacheConfig(update<SrcT>, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( update<SrcT>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<SrcT>) frame, fgmask, colors, weights, nfeatures, frameNum, learningRate, updateBackgroundModel); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void update_gpu<uchar >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<uchar3 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<uchar4 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<ushort >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<ushort3>(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<ushort4>(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<float >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<float3 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<float4 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); } }}} #endif /* CUDA_DISABLER */
a57e7c503f362cbdc5b3ac1e04f64b9e18a20d4a.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/limits.hpp" namespace cv { namespace gpu { namespace device { namespace bgfg_gmg { __constant__ int c_width; __constant__ int c_height; __constant__ float c_minVal; __constant__ float c_maxVal; __constant__ int c_quantizationLevels; __constant__ float c_backgroundPrior; __constant__ float c_decisionThreshold; __constant__ int c_maxFeatures; __constant__ int c_numInitializationFrames; void loadConstants(int width, int height, float minVal, float maxVal, int quantizationLevels, float backgroundPrior, float decisionThreshold, int maxFeatures, int numInitializationFrames) { cudaSafeCall( cudaMemcpyToSymbol(c_width, &width, sizeof(width)) ); cudaSafeCall( cudaMemcpyToSymbol(c_height, &height, sizeof(height)) ); cudaSafeCall( cudaMemcpyToSymbol(c_minVal, &minVal, sizeof(minVal)) ); cudaSafeCall( cudaMemcpyToSymbol(c_maxVal, &maxVal, sizeof(maxVal)) ); cudaSafeCall( cudaMemcpyToSymbol(c_quantizationLevels, &quantizationLevels, sizeof(quantizationLevels)) ); cudaSafeCall( cudaMemcpyToSymbol(c_backgroundPrior, &backgroundPrior, sizeof(backgroundPrior)) ); cudaSafeCall( cudaMemcpyToSymbol(c_decisionThreshold, &decisionThreshold, sizeof(decisionThreshold)) ); cudaSafeCall( cudaMemcpyToSymbol(c_maxFeatures, &maxFeatures, sizeof(maxFeatures)) ); cudaSafeCall( cudaMemcpyToSymbol(c_numInitializationFrames, &numInitializationFrames, sizeof(numInitializationFrames)) ); } __device__ float findFeature(const int color, const PtrStepi& colors, const PtrStepf& weights, const int x, const int y, const int nfeatures) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) { if (color == colors(fy, x)) return weights(fy, x); } // not in histogram, so return 0. return 0.0f; } __device__ void normalizeHistogram(PtrStepf weights, const int x, const int y, const int nfeatures) { float total = 0.0f; for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) total += weights(fy, x); if (total != 0.0f) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) weights(fy, x) /= total; } } __device__ bool insertFeature(const int color, const float weight, PtrStepi colors, PtrStepf weights, const int x, const int y, int& nfeatures) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) { if (color == colors(fy, x)) { // feature in histogram weights(fy, x) += weight; return false; } } if (nfeatures == c_maxFeatures) { // discard oldest feature int idx = -1; float minVal = numeric_limits<float>::max(); for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) { const float w = weights(fy, x); if (w < minVal) { minVal = w; idx = fy; } } colors(idx, x) = color; weights(idx, x) = weight; return false; } colors(nfeatures * c_height + y, x) = color; weights(nfeatures * c_height + y, x) = weight; ++nfeatures; return true; } namespace detail { template <int cn> struct Quantization { template <typename T> __device__ static int apply(const T& val) { int res = 0; res |= static_cast<int>((val.x - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)); res |= static_cast<int>((val.y - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 8; res |= static_cast<int>((val.z - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 16; return res; } }; template <> struct Quantization<1> { template <typename T> __device__ static int apply(T val) { return static_cast<int>((val - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)); } }; } template <typename T> struct Quantization : detail::Quantization<VecTraits<T>::cn> {}; template <typename SrcT> __global__ void update(const PtrStep<SrcT> frame, PtrStepb fgmask, PtrStepi colors_, PtrStepf weights_, PtrStepi nfeatures_, const int frameNum, const float learningRate, const bool updateBackgroundModel) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= c_width || y >= c_height) return; const SrcT pix = frame(y, x); const int newFeatureColor = Quantization<SrcT>::apply(pix); int nfeatures = nfeatures_(y, x); if (frameNum >= c_numInitializationFrames) { // typical operation const float weight = findFeature(newFeatureColor, colors_, weights_, x, y, nfeatures); // see Godbehere, Matsukawa, Goldberg (2012) for reasoning behind this implementation of Bayes rule const float posterior = (weight * c_backgroundPrior) / (weight * c_backgroundPrior + (1.0f - weight) * (1.0f - c_backgroundPrior)); const bool isForeground = ((1.0f - posterior) > c_decisionThreshold); fgmask(y, x) = (uchar)(-isForeground); // update histogram. if (updateBackgroundModel) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) weights_(fy, x) *= 1.0f - learningRate; bool inserted = insertFeature(newFeatureColor, learningRate, colors_, weights_, x, y, nfeatures); if (inserted) { normalizeHistogram(weights_, x, y, nfeatures); nfeatures_(y, x) = nfeatures; } } } else if (updateBackgroundModel) { // training-mode update insertFeature(newFeatureColor, 1.0f, colors_, weights_, x, y, nfeatures); if (frameNum == c_numInitializationFrames - 1) normalizeHistogram(weights_, x, y, nfeatures); } } template <typename SrcT> void update_gpu(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(update<SrcT>, cudaFuncCachePreferL1) ); update<SrcT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, fgmask, colors, weights, nfeatures, frameNum, learningRate, updateBackgroundModel); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void update_gpu<uchar >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<uchar3 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<uchar4 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<ushort >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<ushort3>(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<ushort4>(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<float >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<float3 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<float4 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); } }}} #endif /* CUDA_DISABLER */
4acd95ff278d85534f7f8db122902ee8028ba573.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * Mobvoi Inc. (authors: Fangjun Kuang) * Yiming Wang * * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <cmath> #include <memory> #include <vector> #include "k2/csrc/array_ops.h" #include "k2/csrc/cub.h" #include "k2/csrc/macros.h" #include "k2/csrc/math.h" #include "k2/csrc/moderngpu_allocator.h" #include "k2/csrc/ragged.h" #include "k2/csrc/ragged_ops.h" #include "k2/csrc/ragged_utils.h" #include "moderngpu/kernel_mergesort.hxx" namespace { /* A helper function used in RaggedShape3; if both first and second are non-NULL, it will check if the context of them is compatible or not and return that context if compatible; if one of them is NULL, returns the other one's context. */ static k2::ContextPtr GetContext(const k2::Array1<int32_t> *first, const k2::Array1<int32_t> *second) { K2_CHECK(first != nullptr || second != nullptr) << "At least one of first and second must be non-NULL"; if (first == nullptr) return second->Context(); else if (second == nullptr) return first->Context(); else return k2::GetContext(*first, *second); } } // namespace namespace k2 { RaggedShape RandomRaggedShape(bool set_row_ids, int32_t min_num_axes, int32_t max_num_axes, int32_t min_num_elements, int32_t max_num_elements) { ContextPtr c = GetCpuContext(); K2_CHECK(min_num_axes >= 2 && max_num_axes >= min_num_axes && min_num_elements >= 0 && max_num_elements >= min_num_elements); int32_t num_axes = RandInt(min_num_axes, max_num_axes); int32_t num_elements = RandIntGeometric(min_num_elements, max_num_elements); bool done_repeats = false; std::vector<RaggedShapeLayer> axes(num_axes - 1); for (int32_t axis = num_axes - 2; axis >= 0; axis--) { // this axis will have row_ids of length num_elements and // row_splits of length to be determined. int32_t cur_row_split = 0; std::vector<int32_t> row_splits_vec; std::vector<int32_t> row_ids_vec; row_splits_vec.push_back(cur_row_split); // The reason for "|| RandInt(0, 2) == 0)" is so that even if there // are no elements we can still potentially generate empty row-splits. while (cur_row_split < num_elements || RandInt(0, 2) == 0) { int32_t split_size = RandIntGeometric(0, num_elements - cur_row_split); cur_row_split += split_size; // sometimes we have a bunch of empty rows in a row (this will test out // more of the code), so here we generate a bunch of empty rows, but we // just do this only once (that's why we declare `done_repeats` here). if (split_size == 0 && RandInt(0, 30) == 0 && !done_repeats) { int32_t num_repeats = RandIntGeometric(1, 128); row_splits_vec.insert(row_splits_vec.end(), num_repeats, cur_row_split); // don't need to set `row_ids_vec` as there's no element. done_repeats = true; } row_splits_vec.push_back(cur_row_split); if (set_row_ids) { int32_t cur_row = static_cast<int32_t>(row_splits_vec.size()) - 2; row_ids_vec.insert(row_ids_vec.end(), split_size, cur_row); } } axes[axis].row_splits = Array1<int32_t>(c, row_splits_vec); if (set_row_ids) axes[axis].row_ids = Array1<int32_t>(c, row_ids_vec); axes[axis].cached_tot_size = num_elements; num_elements = axes[axis].row_splits.Dim() - 1; } // RaggedShape(axes, true) will check the returned RaggedShape for // consistency. return RaggedShape(axes, true); } RaggedShape RaggedShape2(Array1<int32_t> *row_splits, Array1<int32_t> *row_ids, int32_t cached_tot_size) { NVTX_RANGE(K2_FUNC); K2_CHECK(row_splits != nullptr || row_ids != nullptr) << "At least one of row_splits and row_ids must be defined"; ContextPtr ctx = ::GetContext(row_splits, row_ids); if (cached_tot_size != -1) { if (row_ids != nullptr) K2_CHECK_EQ(cached_tot_size, row_ids->Dim()); if (row_splits != nullptr) { // may be slow as it may copy memory from device to host K2_DCHECK_EQ(cached_tot_size, row_splits->Back()) << "Bad row splits is: " << *row_splits; } } std::vector<RaggedShapeLayer> axes(1); if (row_splits != nullptr) { axes[0].row_splits = *row_splits; } else { // we need to work out row_splits as we always require row_splits is not // empty for RaggedShape. Note here we suppose the last element in row_ids // is num_rows - 1, i.e. there're no empty rows after row `row_ids[-1]`. int32_t num_rows = row_ids->Dim() == 0 ? 0 : row_ids->Back() + 1; Array1<int32_t> row_splits_array(ctx, num_rows + 1); RowIdsToRowSplits(*row_ids, &row_splits_array); axes[0].row_splits = row_splits_array; } if (row_ids != nullptr) axes[0].row_ids = *row_ids; if (cached_tot_size == -1) { cached_tot_size = row_ids != nullptr ? row_ids->Dim() : axes[0].row_splits.Back(); } axes[0].cached_tot_size = cached_tot_size; // note below line will check if row_splits and row_ids are valid and agree // with each other. return RaggedShape(axes); } RaggedShape ComposeRaggedShapes(const RaggedShape &a, const RaggedShape &b) { NVTX_RANGE(K2_FUNC); if (a.NumElements() != b.Dim0()) { K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements() << " vs. " << b.Dim0(); } K2_CHECK(IsCompatible(a, b)); const auto &a_axes = a.Layers(); const auto &b_axes = b.Layers(); std::size_t a_size = a_axes.size(), b_size = b_axes.size(); std::vector<RaggedShapeLayer> axes; axes.reserve(a_size + b_size); for (std::size_t i = 0; i < a_size; ++i) axes.emplace_back(a_axes[i]); for (std::size_t i = 0; i < b_size; ++i) axes.emplace_back(b_axes[i]); bool validate = false; return RaggedShape(axes, validate); } RaggedShape ComposeRaggedShapes3(const RaggedShape &a, const RaggedShape &b, const RaggedShape &c) { NVTX_RANGE(K2_FUNC); if (a.NumElements() != b.Dim0()) { K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements() << " vs. " << b.Dim0(); } if (b.NumElements() != c.Dim0()) { K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << b.NumElements() << " vs. " << c.Dim0(); } K2_CHECK(IsCompatible(a, b)); K2_CHECK(IsCompatible(b, c)); const auto &a_axes = a.Layers(); const auto &b_axes = b.Layers(); const auto &c_axes = c.Layers(); std::size_t a_size = a_axes.size(), b_size = b_axes.size(), c_size = c_axes.size(); std::vector<RaggedShapeLayer> axes; axes.reserve(a_size + b_size + c_size); for (std::size_t i = 0; i < a_size; ++i) axes.emplace_back(a_axes[i]); for (std::size_t i = 0; i < b_size; ++i) axes.emplace_back(b_axes[i]); for (std::size_t i = 0; i < c_size; ++i) axes.emplace_back(c_axes[i]); bool validate = false; return RaggedShape(axes, validate); } RaggedShape RaggedShape3(Array1<int32_t> *row_splits1, Array1<int32_t> *row_ids1, int32_t cached_tot_size1, Array1<int32_t> *row_splits2, Array1<int32_t> *row_ids2, int32_t cached_tot_size2) { NVTX_RANGE(K2_FUNC); RaggedShape shape1 = RaggedShape2(row_splits1, row_ids1, cached_tot_size1); Array1<int32_t> temp_array; if (row_splits2 == nullptr) { K2_CHECK_NE(row_ids2, nullptr) << "Either row-splits or row-ids must be defined"; temp_array = Array1<int32_t>(row_ids2->Context(), shape1.NumElements() + 1); row_splits2 = &temp_array; RowIdsToRowSplits(*row_ids2, row_splits2); } return ComposeRaggedShapes( shape1, RaggedShape2(row_splits2, row_ids2, cached_tot_size2)); } RaggedShape RaggedShape4(Array1<int32_t> *row_splits1, Array1<int32_t> *row_ids1, int32_t cached_tot_size1, Array1<int32_t> *row_splits2, Array1<int32_t> *row_ids2, int32_t cached_tot_size2, Array1<int32_t> *row_splits3, Array1<int32_t> *row_ids3, int32_t cached_tot_size3) { NVTX_RANGE(K2_FUNC); RaggedShape shape12 = RaggedShape3(row_splits1, row_ids1, cached_tot_size1, row_splits2, row_ids2, cached_tot_size2); Array1<int32_t> temp_array; if (row_splits3 == nullptr) { K2_CHECK_NE(row_ids3, nullptr) << "Either row-splits or row-ids must be defined"; temp_array = Array1<int32_t>(row_ids3->Context(), shape12.NumElements() + 1); row_splits3 = &temp_array; RowIdsToRowSplits(*row_ids3, row_splits3); } return ComposeRaggedShapes( shape12, RaggedShape2(row_splits3, row_ids3, cached_tot_size3)); } RaggedShape RaggedShapeFromTotSizes(ContextPtr c, int32_t num_axes, const int32_t *tot_sizes) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(num_axes, 2); std::vector<RaggedShapeLayer> axes(num_axes - 1); // In future we might choose to allocate everything in one big array, to avoid // multiple allocations, but for now just do it the simple way. for (int32_t axis = 1; axis < num_axes; ++axis) { axes[axis - 1].row_splits = Array1<int32_t>(c, tot_sizes[axis - 1] + 1); axes[axis - 1].row_ids = Array1<int32_t>(c, tot_sizes[axis]); axes[axis - 1].cached_tot_size = tot_sizes[axis]; } // Not check here as we did not set the values of row_splits and row_ids return RaggedShape(axes, false); } // See declaration in ragged.h for documentation of its purpose and interface. RaggedShape Unsqueeze(const RaggedShape &src, int32_t axis) { // If axis == 0, initial row_splits and row_ids will look like the following, // if for example src.Dim0() was 5: [ 0 5 ], [ 0 0 0 0 0 ]. The other axes // would be pushed forward. // // If 0 < axis <= src.NumAxes(), the inserted row_splits and row_ids would // look like the following, if for instance the src.TotSize(axis) = 8: // [ 0 1 2 3 4 5 6 7 8 ], [ 0 1 2 3 4 5 6 7 ]. // // The reason why the code is different for axis == 0, is that in that case we // are really making visible an "implicit" axis of the input `src`; we could // call it axis 0 of the original RaggedShape. Imagine that "implicit" axis's // row_splits and row_ids map respectively from an idx_minus1 -> idx0 and from // an idx_0 to idx_minus1, where idx_minus1 is always 0 and 0 <= idx0 < // Dim0(). NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); K2_CHECK(axis >= 0 && axis <= src.NumAxes()); const std::vector<RaggedShapeLayer> &axes_in = src.Layers(); int32_t num_axes_in = src.NumAxes(); // Note: in RaggedShape, the vector of RaggedShapeLayer is of length // num_axes - 1, so the output will have one more axis than the input. std::vector<RaggedShapeLayer> axes_out(num_axes_in); int32_t row_splits_dim, row_ids_dim; Array1<int32_t> mem; if (axis == 0) { row_splits_dim = 2; // e.g. [ 0 5 ] row_ids_dim = src.Dim0(); // e.g. [ 0 0 0 0 0 ] mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim); int32_t *mem_data = mem.Data(); K2_EVAL( c, mem.Dim(), lambda_set_mem, (int32_t i)->void { if (i == 1) mem_data[i] = row_ids_dim; else mem_data[i] = 0; }); } else { int32_t tot_size = src.TotSize(axis); row_splits_dim = tot_size + 1; row_ids_dim = tot_size; mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim); int32_t *mem_data = mem.Data(); K2_EVAL( c, mem.Dim(), lambda_set_mem2, (int32_t i)->void { mem_data[i] = i % (tot_size + 1); }); } axes_out[axis].row_splits = mem.Range(0, row_splits_dim); axes_out[axis].row_ids = mem.Range(row_splits_dim, row_ids_dim); axes_out[axis].cached_tot_size = row_ids_dim; for (int32_t i = 0; i < axis; ++i) axes_out[i] = axes_in[i]; // Note: the returned array has `num_axes_in + 1` axes, so its // array of RaggedShapeLayer is of length `num_axes_in`. for (int32_t i = axis + 1; i < num_axes_in; ++i) axes_out[i] = axes_in[i - 1]; return RaggedShape(axes_out); } std::vector<RaggedShape> UnsqueezeParallel(int32_t num_srcs, RaggedShape **src, int32_t axis) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(axis, 0); std::vector<RaggedShape> ans; if (num_srcs == 0) return ans; ans.reserve(num_srcs); ContextPtr &c = src[0]->Context(); std::vector<int32_t> all_row_splits_vec(num_srcs * 2); int32_t max_dim = 0; // all_row_splits_vec will contain [ 0 d0 0 d1 0 d2 .. ] // where d0 == src[0]->Dim0(), d1 == src[1]->Dim0().. for (int32_t i = 0; i < num_srcs; ++i) { int32_t this_dim0 = src[i]->Dim0(); if (this_dim0 > max_dim) max_dim = this_dim0; all_row_splits_vec[i * 2] = 0; all_row_splits_vec[i * 2 + 1] = this_dim0; } Array1<int32_t> all_row_splits(c, all_row_splits_vec); Array1<int32_t> all_row_ids(c, max_dim, 0); for (int32_t i = 0; i < num_srcs; ++i) { int32_t num_axes = src[i]->NumAxes(); std::vector<RaggedShapeLayer> axes; axes.reserve(num_axes); // note, the size of the `layers` of a RaggedShape // is its NumAxes() - 1. axes.resize(1); int32_t this_old_dim0 = all_row_splits_vec[i * 2 + 1]; axes[0].row_splits = all_row_splits.Range(i * 2, 2); axes[0].row_ids = all_row_ids.Range(0, this_old_dim0); axes[0].cached_tot_size = this_old_dim0; axes.insert(axes.end(), src[i]->Layers().begin(), src[i]->Layers().end()); ans.emplace_back(std::move(axes)); } return ans; } /* Internal function used in Index(), which gets certain arrays used internally. @param [in] src Source shape to be indexed @param [in] new2old Array of indexes into axis 0 of src; elements equal to -1 will be interpreted as referring to an empty list. @param [out] old_offsets Will be set to new Array2 with dimension (src.NumAxes(), new2old.Dim()), whose (i,j)'th element contains the offset into axis i of `src` where the slice of `src` with index0 (i.e. index into 0'th-axis of `src`) equal to `new2old[j]` begins. @param [out] new_offsets Will be set to new Array2 with dimension (src.NumAxes(), new2old.Dim()+1), whose (i,j)'th element contains the offset into axis i of `ans` where the data in `ans` corresponding to index j (i.e. index j into axis 0 of `ans`) begins. Note: `ans` is the result of Index(), with ans.Dim0() == new2old.Dim(). */ inline void GetOldAndNewOffsets(RaggedShape &src, const Array1<int32_t> &new2old, Array2<int32_t> *old_offsets, Array2<int32_t> *new_offsets) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(src.NumAxes(), 1); ContextPtr &c = src.Context(); int32_t num_axes = src.NumAxes(), ans_dim0 = new2old.Dim(); // max 5 layers. RowSplitsAccessor<5> row_splits_acc(src); const int32_t *new2old_data = new2old.Data(); *old_offsets = Array2<int32_t>(c, num_axes, ans_dim0); *new_offsets = Array2<int32_t>(c, num_axes, ans_dim0 + 1); auto old_offsets_acc = old_offsets->Accessor(), new_offsets_acc = new_offsets->Accessor(); // Set old_offsets; and for now, set new_offsets to the corresponding // sizes of the output slices. K2_EVAL( c, ans_dim0, lambda_set_offsets, (int32_t i)->void { // 0 <= i < ans_dim0 int32_t old_offset = new2old_data[i], old_offset_next = old_offset + 1, offset_diff = 1; // The following is a special case that interprets -1 as referring to an // empty list. In this case, old_offset == old_offset_next == 0. // The specific value 0 is not necessary; they could be equal // and have any value in [0, src.Dim0() - 1] and still refer to // the empty list. if (old_offset == -1) old_offset = 0; for (int32_t axis = 0;; axis++) { old_offsets_acc(axis, i) = old_offset; // Below, 'new_offsets_acc' currently contains the size rather // than the offset; we need to do exclusive-sum. new_offsets_acc(axis, i) = offset_diff; if (axis + 1 == num_axes) return; old_offset = row_splits_acc(axis)[old_offset]; old_offset_next = row_splits_acc(axis)[old_offset_next]; offset_diff = old_offset_next - old_offset; } }); ExclusiveSum(*new_offsets, new_offsets); } static RaggedShape IndexAxis0(RaggedShape &src, const Array1<int32_t> &new2old, Array1<int32_t> *elem_indexes /*=nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); bool is_cpu = (c->GetDeviceType() == kCpu); K2_CHECK(IsCompatible(src, new2old)); int32_t num_axes = src.NumAxes(), src_dim0 = src.Dim0(), ans_dim0 = new2old.Dim(); if (ans_dim0 == 0) { if (elem_indexes) *elem_indexes = Array1<int32_t>(c, 0); return EmptyRaggedShape(c, num_axes); } Array2<int32_t> old_offsets, // num_axes by ans_dim0 new_offsets; // num_axes by (ans_dim0 + 1). GetOldAndNewOffsets(src, new2old, &old_offsets, &new_offsets); // tot_sizes_out is of dimension (num_axes), tot_sizes_out[i] is // ans.TotSize(i) Array1<int32_t> tot_sizes_out = Array1<int32_t>(new_offsets.Col(ans_dim0)).To(GetCpuContext()); int32_t *tot_sizes_out_cpu_data = tot_sizes_out.Data(); if (elem_indexes) *elem_indexes = Array1<int32_t>(c, tot_sizes_out_cpu_data[num_axes - 1]); RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out_cpu_data); auto old_offsets_acc = old_offsets.Accessor(), new_offsets_acc = new_offsets.Accessor(); for (int32_t axis = 1; axis < num_axes; axis++) { // we are not creating the actual row_ids here, except for axis 1; we are // creating "composed row_ids" which map to the index on axis 0. Array1<int32_t> row_ids = ans.RowIds(axis); RowSplitsToRowIds(new_offsets.Row(axis), &row_ids); } ans.Layers()[0].row_splits = new_offsets.Row(1); // Caution: e.g. old_row_splits_acc(i) == src.RowSplits(i+1). RowSplitsAccessor<5> old_row_splits_acc(src), new_row_splits_acc(ans); RowIdsAccessor<5> old_row_ids_acc(src), new_row_ids_acc(ans); SmallVec<int32_t, 6> tot_sizes; K2_CHECK(num_axes <= 6); int32_t max_tot_size = 0; for (int32_t i = 0; i < num_axes; i++) { tot_sizes.data[i] = tot_sizes_out_cpu_data[i]; max_tot_size = std::max<int32_t>(max_tot_size, tot_sizes.data[i]); } int32_t *elem_indexes_data = (elem_indexes != nullptr ? elem_indexes->Data() : nullptr); // Note, the first row_splits vector was set above, ans.Layers()[0].row_splits // = new_offsets.Row(1). auto lambda_set_row_splits_and_ids = [=] __host__ __device__( int32_t axis, int32_t i) -> void { axis++; // make it one-based. int32_t tot_size = tot_sizes(axis); // == new_offsets_acc(axis, ans_dim0); if (i > tot_size) return; int32_t *composed_row_ids_data = new_row_ids_acc(axis - 1); int32_t ans_idx0 = (i == tot_size ? ans_dim0 : composed_row_ids_data[i]), job_begin = new_offsets_acc(axis, ans_idx0), job_this_idx0 = i - job_begin; K2_CHECK_GE(job_this_idx0, 0); int32_t row_split_value = 0, new_next_offset = 0; if (axis + 1 < num_axes) new_next_offset = new_offsets_acc(axis + 1, ans_idx0); if (i < tot_size) { // "prev" means for axis - 1 int32_t new_prev_offset = new_offsets_acc(axis - 1, ans_idx0), old_prev_offset = old_offsets_acc(axis - 1, ans_idx0), old_offset = old_offsets_acc(axis, ans_idx0), old_idx = old_offset + job_this_idx0; if (axis != 1) { // Write row-ids. // Actually doing this for axis == 1 is harmless, but unnecessary, as it // would write back the same values that were already there. We avoid // the memory access. // this_new_row_ids = new_row_ids_acc(axis - 1); int32_t *this_new_row_ids = composed_row_ids_data; const int32_t *this_old_row_ids = old_row_ids_acc(axis - 1); int32_t old_row_id = this_old_row_ids[old_idx], new_row_id = old_row_id + new_prev_offset - old_prev_offset; this_new_row_ids[i] = new_row_id; } if (elem_indexes_data != nullptr && axis == num_axes - 1) elem_indexes_data[i] = old_idx; if (axis + 1 < num_axes) { int32_t old_next_offset = old_offsets_acc(axis + 1, ans_idx0), next_offset_diff = new_next_offset - old_next_offset; const int32_t *old_row_splits_data = old_row_splits_acc(axis); row_split_value = next_offset_diff + old_row_splits_data[old_idx]; } } else { row_split_value = new_next_offset; } if (axis + 1 < num_axes) { int32_t *new_row_splits_data = new_row_splits_acc(axis); new_row_splits_data[i] = row_split_value; } }; constexpr int32_t cutoff = 50000; if (c->GetDeviceType() == kCpu) { for (int32_t axis = 0; axis < num_axes - 1; axis++) { int32_t this_size = tot_sizes(axis + 1); for (int32_t i = 0; i <= this_size; i++) lambda_set_row_splits_and_ids(axis, i); } } else if (max_tot_size * (num_axes - 1) < cutoff) { Eval2Device(c, num_axes - 1, max_tot_size + 1, lambda_set_row_splits_and_ids); } else { // Loop in the kernel rather than submitting an excessive number of threads. auto lambda_loop = [=] __device__(int32_t i) { for (int32_t axis = 0; axis < num_axes - 1; axis++) { lambda_set_row_splits_and_ids(axis, i); } }; EvalDevice(c, max_tot_size + 1, lambda_loop); } #if !defined(NDEBUG) ans.Check(); #endif return ans; } RaggedShape Index(RaggedShape &src, int32_t axis, const Array1<int32_t> &indexes, Array1<int32_t> *elem_indexes /*=nullptr*/) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); K2_CHECK_LT(static_cast<uint32_t>(axis), static_cast<uint32_t>(num_axes)); if (axis == 0) { return IndexAxis0(src, indexes, elem_indexes); } else if (axis == src.NumAxes() - 1) { // This code is related to SubsampleRaggedShape(). `indexes` corresponds // to `new2old`. Array1<int32_t> last_row_ids = src.RowIds(num_axes - 1)[indexes]; #ifndef NDEBUG if (!IsMonotonic(last_row_ids)) { K2_LOG(FATAL) << "Invalid indexes used when indexing RaggedShape"; } #endif Array1<int32_t> last_row_splits(last_row_ids.Context(), src.TotSize(num_axes - 2) + 1); RowIdsToRowSplits(last_row_ids, &last_row_splits); if (elem_indexes) *elem_indexes = indexes; std::vector<RaggedShapeLayer> axes = src.Layers(); axes.back().row_splits = last_row_splits; axes.back().row_ids = last_row_ids; axes.back().cached_tot_size = last_row_ids.Dim(); // TODO: disable checking by changing true to false. return RaggedShape(axes, true); } else { RaggedShape top, bottom; DecomposeRaggedShape(src, axis, &top, &bottom); RaggedShape top_indexed = Index(top, axis, indexes, nullptr), bottom_indexed = IndexAxis0(bottom, indexes, elem_indexes); return ComposeRaggedShapes(top_indexed, bottom_indexed); } } // returns array of dim (src[0]->NumAxes() + 1) by (num_srcs + 1), // see documentation in header. Array2<int32_t> GetOffsets(int32_t num_srcs, RaggedShape **src) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_srcs, 0); int32_t num_axes_in = src[0]->NumAxes(); ContextPtr &ctx = src[0]->Context(); Array2<int32_t> src_offsets(GetCpuContext(), num_axes_in + 1, num_srcs + 1); int32_t *src_offsets_data = src_offsets.Data(); int32_t src_offsets_stride0 = src_offsets.ElemStride0(); // Check if they have same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in); K2_CHECK(ctx->IsCompatible(*src[i]->Context())); } for (int32_t axis = 0; axis <= num_axes_in; ++axis) { int32_t sum = 0; for (int32_t i = 0; i <= num_srcs; ++i) { // i is the column src_offsets_data[axis * src_offsets_stride0 + i] = sum; if (i < num_srcs) { sum += (axis == 0 ? 1 : src[i]->TotSize(axis - 1)); } } } return src_offsets; } void GetRowInfo(RaggedShape &src, Array1<int32_t *> *row_splits, Array1<int32_t *> *row_ids) { NVTX_RANGE(K2_FUNC); int32_t axes = src.NumAxes(); K2_CHECK_GE(axes, 2); src.Populate(); std::vector<int32_t *> row_splits_ptrs(axes - 1); std::vector<int32_t *> row_ids_ptrs(axes - 1); for (int32_t i = 1; i != axes; ++i) { row_splits_ptrs[i - 1] = src.RowSplits(i).Data(); row_ids_ptrs[i - 1] = src.RowIds(i).Data(); } ContextPtr ctx = src.Context(); *row_splits = Array1<int32_t *>(ctx, row_splits_ptrs); *row_ids = Array1<int32_t *>(ctx, row_ids_ptrs); } void GetRowInfoMulti(int32_t num_srcs, RaggedShape **src, Array2<int32_t *> *row_splits, Array2<int32_t *> *row_ids) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_srcs, 0); int32_t num_axes_in = src[0]->NumAxes(); K2_CHECK_GE(num_axes_in, 2); ContextPtr ctx = src[0]->Context(); // check if they have same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in); K2_CHECK(ctx->IsCompatible(*src[i]->Context())); } Array2<int32_t *> row_splits_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs); Array2<int32_t *> row_ids_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs); int32_t **splits_ptr_data = row_splits_ptrs.Data(); int32_t **ids_ptr_data = row_ids_ptrs.Data(); int32_t stride0 = row_splits_ptrs.ElemStride0(); K2_CHECK_EQ(stride0, row_ids_ptrs.ElemStride0()); for (int32_t axis = 0; axis != num_axes_in - 1; ++axis) { for (int32_t i = 0; i != num_srcs; ++i) { splits_ptr_data[axis * stride0 + i] = src[i]->RowSplits(axis + 1).Data(); ids_ptr_data[axis * stride0 + i] = src[i]->RowIds(axis + 1).Data(); } } *row_splits = row_splits_ptrs.To(ctx); *row_ids = row_ids_ptrs.To(ctx); } static RaggedShape StackAxis0(int32_t num_srcs, RaggedShape **src, Array1<uint32_t> *merge_map /* == nullptr*/) { NVTX_RANGE(K2_FUNC); if (num_srcs == 1) { if (merge_map) *merge_map = Arange<uint32_t>(src[0]->Context(), 0, src[0]->NumElements()); RaggedShape top_layer = TrivialShape(src[0]->Context(), src[0]->Dim0()); return ComposeRaggedShapes(top_layer, **src); } // We can't handle num_srcs == 0 because we won't have a context object. K2_CHECK_GT(num_srcs, 1); int32_t num_axes_in = src[0]->NumAxes(), num_axes_out = num_axes_in + 1; ContextPtr c = src[0]->Context(); bool is_cpu = (c->GetDeviceType() == kCpu); // Check if they have same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(num_axes_in, src[i]->NumAxes()); K2_CHECK(IsCompatible(*src[0], *src[i])); } // `offsets` will be on CPU for now. // It shape is (num_axes_in + 1 == num_axes_out, num_srcs + 1). Array2<int32_t> offsets = GetOffsets(num_srcs, src); auto offsets_acc = offsets.Accessor(); SmallVec<int32_t, 6> tot_sizes_out; K2_CHECK(num_axes_out <= 6); int32_t max_tot_size = 0; for (int32_t axis = 0; axis < num_axes_out; axis++) { tot_sizes_out.data[axis] = offsets_acc(axis, num_srcs); max_tot_size = std::max<int32_t>(max_tot_size, tot_sizes_out.data[axis]); } RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes_out, tot_sizes_out.data); // src_row_splits and src_row_ids are of dim num_axes_in-1 by num_srcs. Array2<int32_t *> src_row_splits, src_row_ids; GetRowInfoMulti(num_srcs, src, &src_row_splits, &src_row_ids); auto src_row_splits_acc = src_row_splits.Accessor(), src_row_ids_acc = src_row_ids.Accessor(); offsets = offsets.To(c); offsets_acc = offsets.Accessor(); for (int32_t axis = 1; axis < num_axes_out; axis++) { // we are not creating the actual row_ids here, except for axis 1; we are // creating "composed row_ids" which map to the index on axis 0. Array1<int32_t> row_ids = ans.RowIds(axis); RowSplitsToRowIds(offsets.Row(axis), &row_ids); } ans.Layers()[0].row_splits = offsets.Row(1); // Caution: e.g. old_row_splits_acc(i) == src.RowSplits(i+1). RowSplitsAccessor<5> new_row_splits_acc(ans); RowIdsAccessor<5> new_row_ids_acc(ans); uint32_t *merge_map_data; if (merge_map != nullptr) { *merge_map = Array1<uint32_t>(c, tot_sizes_out.data[num_axes_out - 1]); merge_map_data = merge_map->Data(); } else { merge_map_data = nullptr; } // Note, the first row_splits vector was set above, ans.Layers()[0].row_splits // = new_offsets.Row(1). auto lambda_set_row_splits_and_ids = [=] __host__ __device__( int32_t axis, int32_t i) -> void { ++axis; // We want this to be called starting with axis == 1, but Eval2 // doesn't suppor that. // At this point, 1 < axis < num_axes_out. // This kernel will be writing one or both of: // the row-splits for output-layer==`axis`/input-layer==`axis-1`, // the row-ids for output-layer=`axis-1`/input-layer==`axis-2`. int32_t tot_size = tot_sizes_out(axis); // == offsets_acc(axis, num_srcs); if (i > tot_size) return; int32_t *composed_row_ids_data = new_row_ids_acc(axis - 1); int32_t ans_idx0 = (i == tot_size ? num_srcs : composed_row_ids_data[i]), // note: ans_idx0 == src_idx. job_begin = offsets_acc(axis, ans_idx0), job_this_idx0 = i - job_begin; K2_CHECK_GE(job_this_idx0, 0); int32_t row_split_value = 0, new_next_offset = 0; uint32_t *merge_map_data_local = nullptr; if (axis + 1 < num_axes_out) { new_next_offset = offsets_acc(axis + 1, ans_idx0); } else { merge_map_data_local = merge_map_data; } if (i < tot_size) { // "prev" means for axis - 1 int32_t new_prev_offset = offsets_acc(axis - 1, ans_idx0); if (axis != 1) { // Write row-ids. // this_new_row_ids = new_row_ids_acc(axis - 1); int32_t *this_new_row_ids = composed_row_ids_data; const int32_t *this_src_row_ids = src_row_ids_acc(axis - 2, ans_idx0); int32_t old_row_id = this_src_row_ids[job_this_idx0], new_row_id = old_row_id + new_prev_offset; this_new_row_ids[i] = new_row_id; } if (merge_map_data_local != nullptr) { merge_map_data_local[i] = ans_idx0 + num_srcs * job_this_idx0; } if (axis + 1 < num_axes_out) { const int32_t *src_row_splits_data = src_row_splits_acc(axis - 1, ans_idx0); int32_t old_row_split = src_row_splits_data[job_this_idx0]; row_split_value = new_next_offset + old_row_split; } } else { row_split_value = new_next_offset; } if (axis + 1 < num_axes_out) { int32_t *new_row_splits_data = new_row_splits_acc(axis); new_row_splits_data[i] = row_split_value; } }; constexpr int32_t cutoff = 50000; if (c->GetDeviceType() == kCpu) { for (int32_t axis = 0; axis < num_axes_out - 1; axis++) { int32_t this_size = tot_sizes_out(axis + 1); for (int32_t i = 0; i <= this_size; i++) lambda_set_row_splits_and_ids(axis, i); } } else if (max_tot_size * (num_axes_out - 1) < cutoff) { Eval2Device(c, num_axes_out - 1, max_tot_size + 1, lambda_set_row_splits_and_ids); } else { // Loop in the kernel rather than submitting an excessive number of threads. auto lambda_loop = [=] __device__(int32_t i) { for (int32_t axis = 0; axis < num_axes_out - 1; axis++) { lambda_set_row_splits_and_ids(axis, i); } }; EvalDevice(c, max_tot_size + 1, lambda_loop); } #if !defined(NDEBUG) ans.Check(); #endif return ans; } RaggedShape Cat(int32_t axis, int32_t num_srcs, RaggedShape **src, Array1<uint32_t> *merge_map /* == nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_srcs, 0); if (axis == 0) { RaggedShape temp = StackAxis0(num_srcs, src, merge_map); std::vector<RaggedShapeLayer> ans_layers( temp.Layers().begin() + 1, temp.Layers().end()); return RaggedShape(ans_layers, false); } K2_CHECK_LT(static_cast<uint32_t>(axis), static_cast<uint32_t>(src[0]->NumAxes())); int32_t num_axes = src[0]->NumAxes(); std::vector<RaggedShapeLayer> ans_layers(num_axes - 1); // If axis >= 2, some layers of `src` will pass through unchanged (we should // check that they are identical across all sources). for (int32_t l = 0; l + 1 < axis; l++) { CheckLayerEqual(l, num_srcs, src); ans_layers[l] = src[0]->Layers()[l]; } Array1<uint32_t> merge_map_local; Array1<uint32_t> *this_m = (axis + 1 == num_axes ? merge_map : &merge_map_local); RaggedShape s = IntersperseRaggedLayer(axis - 1, num_srcs, src, this_m), t = SubsampleRaggedLayer(s, 0, num_srcs); ans_layers[axis - 1] = t.Layers()[0]; for (int32_t l = axis; l + 1 < num_axes; l++) { Array1<uint32_t> merge_map_next; Array1<uint32_t> *this_m = (l + 2 == num_axes ? merge_map : &merge_map_next); RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m); ans_layers[l] = r.Layers()[0]; merge_map_local = merge_map_next; } // TODO(dan) after this is debugged: add ", false". return RaggedShape(ans_layers); } RaggedShape RemoveAxis(RaggedShape &src, int32_t axis) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(src.NumAxes(), 2); K2_CHECK(axis >= 0 && axis < src.NumAxes()); // note, `axes_in` is of dim src.NumAxes() - 1. // Also note: axes_in[i] pertains to the relationship between // axes i and i+1 in the source. src.Populate(); const std::vector<RaggedShapeLayer> &axes_in = src.Layers(); std::vector<RaggedShapeLayer> axes_out(axes_in.size() - 1); int32_t axes_out_size = static_cast<int32_t>(axes_out.size()); for (int32_t i = 0; i < axis - 1; ++i) axes_out[i] = axes_in[i]; if (axis > 0 && axis + 1 < src.NumAxes()) { axes_out[axis - 1].row_ids = axes_in[axis - 1].row_ids[axes_in[axis].row_ids]; axes_out[axis - 1].row_splits = axes_in[axis].row_splits[axes_in[axis - 1].row_splits]; axes_out[axis - 1].cached_tot_size = axes_out[axis - 1].row_ids.Dim(); } for (int32_t i = axis; i < axes_out_size; ++i) axes_out[i] = axes_in[i + 1]; return RaggedShape(axes_out); } RaggedShape MakeTransposable(RaggedShape &src) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(src.NumAxes(), 2); int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1); if (src_dim0 <= 1) return src; ContextPtr c = src.Context(); int32_t num_axes = src.NumAxes(); int32_t max_size = src.MaxSize(1); if (max_size <= 0) return src; int32_t ans_tot_size1 = max_size * src_dim0; src.Populate(); const std::vector<RaggedShapeLayer> &axes_in = src.Layers(); std::vector<RaggedShapeLayer> axes_out(num_axes - 1); const int32_t *src_row_splits1_data = src.RowSplits(1).Data(); const int32_t *src_row_ids1_data = src.RowIds(1).Data(); { ParallelRunner pr(c); RaggedShapeLayer &axis1_shape = axes_out[0]; { // set ans.RowSplits(1); With w(pr.NewStream()); axis1_shape.row_splits = Range(c, src_dim0 + 1, 0, max_size); } { // set ans.RowIds(1); With w(pr.NewStream()); axis1_shape.row_ids = Array1<int32_t>(c, ans_tot_size1); int32_t *row_ids1_data = axis1_shape.row_ids.Data(); axis1_shape.cached_tot_size = ans_tot_size1; K2_EVAL( c, ans_tot_size1, lambda_set_row_ids1, (int32_t i)->void { row_ids1_data[i] = i / max_size; }); } if (num_axes > 2) { RaggedShapeLayer &axis2_shape = axes_out[1]; const int32_t *src_row_splits2_data = src.RowSplits(2).Data(); { // set ans.RowSplits(2); With w(pr.NewStream()); axis2_shape.cached_tot_size = src.TotSize(2); axis2_shape.row_splits = Array1<int32_t>(c, ans_tot_size1 + 1); int32_t *ans_row_splits2_data = axis2_shape.row_splits.Data(); K2_EVAL( c, ans_tot_size1 + 1, lambda_set_row_splits2, (int32_t idx01)->void { if (idx01 == ans_tot_size1) { ans_row_splits2_data[idx01] = src_row_splits2_data[src_tot_size1]; return; } int32_t idx0 = idx01 / max_size, idx1 = idx01 % max_size; int32_t idx0x = src_row_splits1_data[idx0], idx0x_next = src_row_splits1_data[idx0 + 1]; int32_t num_elems_this_row = idx0x_next - idx0x; if (idx1 < num_elems_this_row) ans_row_splits2_data[idx01] = src_row_splits2_data[idx0x + idx1]; else ans_row_splits2_data[idx01] = src_row_splits2_data[idx0x_next]; // append empty row }); } { // set ans.RowIds(2); With w(pr.NewStream()); int32_t tot_size2 = src.TotSize(2); axis2_shape.row_ids = Array1<int32_t>(c, tot_size2); int32_t *ans_row_ids2_data = axis2_shape.row_ids.Data(); const int32_t *src_row_ids2_data = src.RowIds(2).Data(); K2_EVAL( c, tot_size2, lambda_set_row_ids2, (int32_t idx012)->void { int32_t src_idx01 = src_row_ids2_data[idx012]; int32_t src_idx0 = src_row_ids1_data[src_idx01]; int32_t src_idx1 = src_idx01 - src_row_splits1_data[src_idx0]; ans_row_ids2_data[idx012] = (src_idx0 * max_size) + src_idx1; }); } } } // copy left row_splits and row_ids; for (int32_t i = 2; i < num_axes - 1; ++i) axes_out[i] = axes_in[i]; return RaggedShape(axes_out); } // transpose axes 0 and 1. RaggedShape Transpose(RaggedShape &src, Array1<int32_t> *value_indexes) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(src.NumAxes(), 2); ContextPtr c = src.Context(); int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1); if (src_dim0 <= 0) return src; int32_t src_dim1 = src_tot_size1 / src_dim0; K2_CHECK_EQ(src_tot_size1 % src_dim0, 0) << "Transpose(): all dims on axis 0 must be the same.\n" << "src_tot_size1: " << src_tot_size1 << "\n" << "src_dim0: " << src_dim0 << ", array is: " << src; K2_DCHECK( Equal(src.RowSplits(1), Range(c, src.RowSplits(1).Dim(), 0, src_dim1))) << " Expected row-splits to be evenly spaced: " << src.RowSplits(1); RaggedShape src_no_axis0 = RemoveAxis(src, 0); K2_CHECK_EQ(src_no_axis0.Dim0(), src_tot_size1); // `renumbering` is a `new2old` map, that maps from the first index in // src_no_axis0_renumbered // to the first index into src_no_axis0. Array1<int32_t> renumbering(c, src_tot_size1); int32_t *renumbering_data = renumbering.Data(); K2_EVAL( c, src_tot_size1, lambda_set_renumbering, (int32_t i)->void { int32_t j = i % src_dim0, k = i / src_dim0, i_old = j * src_dim1 + k; renumbering_data[i] = i_old; }); RaggedShape src_no_axis0_renumbered = Index(src_no_axis0, 0, renumbering, value_indexes); int32_t num_rows = src_dim1, row_splits_dim = num_rows + 1, row_ids_dim = src_tot_size1; std::vector<RaggedShapeLayer> ans_axis0(1); Array1<int32_t> mem(c, row_splits_dim + row_ids_dim); int32_t *mem_data = mem.Data(); K2_EVAL( c, row_splits_dim + row_ids_dim, lambda_set_row_info, (int32_t i)->void { int32_t val; if (i >= row_splits_dim) { // row_ids int32_t elem_idx = i - row_splits_dim; val = elem_idx / src_dim0; } else { // row_splits int32_t row_idx = i; val = row_idx * src_dim0; } mem_data[i] = val; }); ans_axis0[0].row_splits = mem.Range(0, row_splits_dim); ans_axis0[0].row_ids = mem.Range(row_splits_dim, row_ids_dim); ans_axis0[0].cached_tot_size = row_ids_dim; RaggedShape temp(ans_axis0); return ComposeRaggedShapes(temp, src_no_axis0_renumbered); } RaggedShape Stack(int32_t axis, int32_t num_srcs, RaggedShape **src, Array1<uint32_t> *merge_map /* = nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_srcs, 0); K2_CHECK_LT(static_cast<uint32_t>(axis), static_cast<uint32_t>(src[0]->NumAxes())); ContextPtr c = src[0]->Context(); if (axis == 0) { return StackAxis0(num_srcs, src, merge_map); } K2_CHECK_LT(static_cast<uint32_t>(axis), static_cast<uint32_t>(src[0]->NumAxes())); int32_t num_axes = src[0]->NumAxes(); std::vector<RaggedShapeLayer> ans_layers(num_axes); // If axis >= 2, some layers of `src` will pass through unchanged (we should // check that they are identical across all sources). for (int32_t l = 0; l + 1 < axis; l++) { CheckLayerEqual(l, num_srcs, src); ans_layers[l] = src[0]->Layers()[l]; } Array1<uint32_t> merge_map_local; Array1<uint32_t> *this_m = (axis + 1 == num_axes ? merge_map : &merge_map_local); RaggedShape s = IntersperseRaggedLayer(axis - 1, num_srcs, src, this_m); // note: s.Dim0() will be a multiple of num_srcs. ans_layers[axis - 1] = RegularRaggedShape(c, s.Dim0() / num_srcs, num_srcs).Layers()[0]; ans_layers[axis] = s.Layers()[0]; for (int32_t l = axis; l + 1 < num_axes; l++) { Array1<uint32_t> merge_map_next; Array1<uint32_t> *this_m = (l + 2 == num_axes ? merge_map : &merge_map_next); RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m); ans_layers[l + 1] = r.Layers()[0]; merge_map_local = merge_map_next; } // TODO(dan) after this is debugged: add ", false". return RaggedShape(ans_layers); } RaggedShape Merge(int32_t num_srcs, RaggedShape **src, const Array1<uint32_t> &merge_map, Array1<uint32_t> *merge_map_out) { NVTX_RANGE(K2_FUNC); K2_CHECK(num_srcs > 0); int32_t num_layers = src[0]->NumAxes() - 1; std::vector<RaggedShapeLayer> ans_layers(num_layers); // Note: this is a shallow copy. Array1<uint32_t> merge_map_local = merge_map; for (int32_t l = 0; l < num_layers; l++) { Array1<uint32_t> merge_map_next; Array1<uint32_t> *this_m = (l + 1 == num_layers ? merge_map_out : &merge_map_next); RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m); ans_layers[l] = r.Layers()[0]; merge_map_local = merge_map_next; } // TODO(dan) after this is debugged: add ", false". return RaggedShape(ans_layers); } RaggedShape TrivialShape(ContextPtr &c, int32_t num_elems) { NVTX_RANGE(K2_FUNC); // row_splits= [ Array1<int32_t> row_splits = Range<int32_t>(c, 2, 0, num_elems); Array1<int32_t> row_ids(c, num_elems, 0); return RaggedShape2(&row_splits, &row_ids, num_elems); } RaggedShape RegularRaggedShape(ContextPtr &c, int32_t dim0, int32_t dim1) { NVTX_RANGE(K2_FUNC); Array1<int32_t> row_splits = Range<int32_t>(c, dim0 + 1, 0, dim1); Array1<int32_t> row_ids(c, dim0 * dim1); int32_t *row_ids_data = row_ids.Data(); K2_EVAL2( c, dim0, dim1, lambda_set_row_ids, (int32_t i, int32_t j)->void { row_ids_data[i * dim1 + j] = i; }); return RaggedShape2(&row_splits, &row_ids, dim0 * dim1); } Ragged<int32_t> GetCountsPartitioned(Ragged<int32_t> &src, RaggedShape &ans_ragged_shape) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(src.NumAxes(), 2); K2_CHECK_EQ(ans_ragged_shape.NumAxes(), 2); K2_CHECK(IsCompatible(src, ans_ragged_shape)); K2_CHECK_EQ(src.Dim0(), ans_ragged_shape.Dim0()); const Array1<int32_t> &values = src.values; const Array1<int32_t> &row_splits = ans_ragged_shape.RowSplits(1); int32_t n = ans_ragged_shape.NumElements(); Array1<int32_t> counts = GetCounts(values, n); return Ragged<int32_t>(ans_ragged_shape, counts); } static Array1<int32_t> GetTransposeReorderingCpu(Ragged<int32_t> &src, int32_t num_cols) { NVTX_RANGE(K2_FUNC); std::vector<std::vector<int32_t>> column_indexes(num_cols); // [column][row] const int32_t *values_data = src.values.Data(); int32_t n = src.values.Dim(); for (int32_t i = 0; i != n; ++i) { int32_t bucket = values_data[i]; column_indexes[bucket].push_back(i); } Array1<int32_t> ans(src.Context(), n); int32_t *ans_data = ans.Data(); for (int32_t i = 0; i != num_cols; ++i) { std::copy(column_indexes[i].begin(), column_indexes[i].end(), ans_data); ans_data += column_indexes[i].size(); } return ans; } static Array1<int32_t> GetTransposeReorderingThreeAxesCuda(Ragged<int32_t> &src, int32_t num_cols) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(src.NumAxes(), 3); ContextPtr &context = src.Context(); K2_CHECK_EQ(context->GetDeviceType(), kCuda); const Array1<int32_t> &row_splits1 = src.RowSplits(1); const int32_t *row_ids2_data = src.RowIds(2).Data(); const int32_t *value_data = src.values.Data(); Array1<int32_t> segments = src.RowSplits(2)[row_splits1]; auto lambda_comp = [=] __device__(int32_t a_idx012, int32_t b_idx012) -> bool { int32_t a_col_index = value_data[a_idx012]; int32_t b_col_index = value_data[b_idx012]; if (a_col_index < b_col_index) return true; // sort by column indexes if (a_col_index > b_col_index) return false; // at this point, a_idx012 and b_idx012 belong to the same column; // then we sort by its row indexes int32_t a_idx01 = row_ids2_data[a_idx012]; int32_t b_idx01 = row_ids2_data[b_idx012]; if (a_idx01 < b_idx01) return true; if (a_idx01 > b_idx01) return false; // at this point, a_idx012 and b_idx012 are duplicate elements return false; // either true or false is fine }; mgpu::context_t *mgpu_context = GetModernGpuAllocator(context); int32_t n = src.values.Dim(); Array1<int32_t> ans = Range(context, n, 0); if (n == 0) return ans; K2_CUDA_SAFE_CALL(mgpu::segmented_sort(ans.Data(), // keys ans.Dim(), // count segments.Data(), // segments segments.Dim() - 1, // num_segments lambda_comp, *mgpu_context)); return ans; } /* // Checks the result of GetTranspoeReordering(), in debug mode and dies if it is wrong. static void CheckGetTransposeReordering(Ragged<int32_t> &src, Array1<int32_t> &ans) { if (!internal::kDisableDebug && !internal::DisableChecks()) { K2_CHECK(IsPermutation(ans)); K2_CHECK(IsMonotonic(src.values[ans])); } }*/ Array1<int32_t> GetTransposeReordering(Ragged<int32_t> &src, int32_t num_cols) { NVTX_RANGE(K2_FUNC); ContextPtr &context = src.Context(); if (src.NumAxes() < 2 || src.values.Dim() == 0) { // src is empty return Array1<int32_t>(context, 0); } DeviceType device_type = context->GetDeviceType(); if (device_type == kCpu) return GetTransposeReorderingCpu(src, num_cols); K2_CHECK_EQ(device_type, kCuda); (void)GetTransposeReorderingThreeAxesCuda; // remove compiler warnings #if __CUDACC_VER_MAJOR__ > 10 || \ (__CUDACC_VER_MAJOR__ == 10 && \ (__CUDACC_VER_MINOR__ > 1 || \ (__CUDACC_VER_MINOR__ == 1 && __CUDACC_VER_BUILD__ > 105))) // Enable it only for NVCC > 10.1.105 // // Refer to https://github.com/LLNL/axom/issues/88 // NVCC 10.1.105 has a known issue for hipcub::DeviceRadixSort int32_t num_buckets = num_cols; int32_t num_elements = src.values.Dim(); int32_t log_buckets = static_cast<int32_t>(ceilf(log2f(num_buckets))); Array1<int32_t> order = Range(context, num_elements, 0); Array1<int32_t> src_tmp_out(context, num_elements); Array1<int32_t> ans(context, num_elements); hipStream_t stream = context->GetCudaStream(); size_t temp_storage_bytes = 0; K2_CUDA_SAFE_CALL(hipcub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, src.values.Data(), src_tmp_out.Data(), order.Data(), ans.Data(), num_elements, 0, log_buckets, stream)); Array1<int8_t> d_temp_storage(context, temp_storage_bytes); K2_CUDA_SAFE_CALL(hipcub::DeviceRadixSort::SortPairs( d_temp_storage.Data(), temp_storage_bytes, src.values.Data(), src_tmp_out.Data(), order.Data(), ans.Data(), num_elements, 0, log_buckets, stream)); // CheckGetTransposeReordering(src, ans); return ans; #else if (src.NumAxes() == 3) { Array1<int32_t> ans = GetTransposeReorderingThreeAxesCuda(src, num_cols); // CheckGetTransposeReordering(src, ans); return ans; } const int32_t *row_splits1_data = src.RowSplits(src.NumAxes() - 1).Data(); const int32_t *row_ids1_data = src.RowIds(src.NumAxes() - 1).Data(); const int32_t *value_data = src.values.Data(); int32_t n = src.values.Dim(); Array1<int32_t> ans = Range(context, n, 0); if (n == 0) return ans; auto lambda_comp = [=] __device__(int32_t a_idx01, int32_t b_idx01) -> bool { int32_t a_idx0 = row_ids1_data[a_idx01]; int32_t b_idx0 = row_ids1_data[b_idx01]; int32_t a_col_index = value_data[a_idx01]; int32_t b_col_index = value_data[b_idx01]; if (a_col_index < b_col_index) return true; // sort by column indexes if (a_col_index > b_col_index) return false; // now we have a_col_index == b_col_index if (a_idx0 < b_idx0) return true; // sort by row indexes if (a_idx0 > b_idx0) return false; // now we have a_idx0 == b_idx0 && a_col_index == b_col_index // this entry is duplicated in the sparse matrix. return false; // we can return either true or false here. }; mgpu::context_t *mgpu_context = GetModernGpuAllocator(context); K2_CUDA_SAFE_CALL(mgpu::mergesort(ans.Data(), n, lambda_comp, *mgpu_context)); // CheckGetTransposeReordering(src, ans); return ans; #endif } RaggedShape ChangeSublistSize(const RaggedShape &src, int32_t size_delta) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(src.NumAxes(), 2); // the result will have the same num-axes as `src` (the NumAxes() of the // object is not the same as the number of RaggedShapeLayer axes). std::vector<RaggedShapeLayer> ans_axes(src.NumAxes() - 1); int32_t last_axis = src.NumAxes() - 1; // The following will only do something if src.NumAxes() > 2. for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Layers()[i]; ContextPtr &c = src.Context(); int32_t num_rows = src.TotSize(last_axis - 1), src_num_elems = src.TotSize(last_axis), num_elems = src_num_elems + size_delta * num_rows; ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1); ans_axes.back().row_ids = Array1<int32_t>(c, num_elems); ans_axes.back().cached_tot_size = num_elems; const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data(), *src_row_ids_data = src.RowIds(last_axis).Data(); int32_t *row_splits_data = ans_axes.back().row_splits.Data(), *row_ids_data = ans_axes.back().row_ids.Data(); { ParallelRunner pr(c); { With w(pr.NewStream()); K2_EVAL( c, num_rows + 1, lambda_set_row_splits, (int32_t idx0)->void { row_splits_data[idx0] = src_row_splits_data[idx0] + size_delta * idx0; }); } { With w(pr.NewStream()); K2_EVAL( c, src_num_elems, lambda_set_row_ids1, (int32_t src_idx01)->void { int32_t src_idx0 = src_row_ids_data[src_idx01], src_idx0x = src_row_splits_data[src_idx0], src_idx1 = src_idx01 - src_idx0x, new_idx0x = row_splits_data[src_idx0], new_idx0x_next = row_splits_data[src_idx0 + 1], new_idx01 = new_idx0x + src_idx1; // it's only necessary to guard the next statement with in 'if' // because size_delta might be negative. if (new_idx01 < new_idx0x_next) row_ids_data[new_idx01] = src_idx0; }); } if (size_delta > 0) { // This sets the row-ids that are not set by lambda_set_row_ids1. With w(pr.NewStream()); K2_EVAL( c, num_rows * size_delta, lambda_set_row_ids2, (int32_t i)->void { int32_t idx0 = i / size_delta, n = i % size_delta, next_idx0 = idx0 + 1; // The following formula is the same as the one in // lambda_set_row_splits; we want to compute the new value of // row_splits_data[next_idx0] without waiting for that kernel to // terminate. int32_t next_idx0x = src_row_splits_data[next_idx0] + size_delta * next_idx0; row_ids_data[next_idx0x - 1 - n] = idx0; }); } // make the ParallelRunner go out of scope (should do this before any // validation code that gets invoked by the constructor of RaggedShape // below). } return RaggedShape(ans_axes); } // TODO(dan): this could definitely be made more efficient. RaggedShape ChangeSublistSizePinned(RaggedShape &src, int32_t size_delta) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(src.NumAxes(), 2); // the result will have the same num-axes as `src` (the NumAxes() of the // object is not the same as the number of RaggedShapeLayer axes). std::vector<RaggedShapeLayer> ans_axes(src.NumAxes() - 1); int32_t last_axis = src.NumAxes() - 1; // The following will only do something if src.NumAxes() > 2. for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Layers()[i]; ContextPtr &c = src.Context(); int32_t num_rows = src.TotSize(last_axis - 1); ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1); const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data(); int32_t *row_splits_data = ans_axes.back().row_splits.Data(); K2_EVAL( c, num_rows, lambda_set_row_sizes, (int32_t idx0)->void { int32_t orig_size = src_row_splits_data[idx0 + 1] - src_row_splits_data[idx0], size; if (orig_size == 0 || orig_size + size_delta <= 0) size = 0; else size = orig_size + size_delta; row_splits_data[idx0] = size; }); ExclusiveSum(ans_axes.back().row_splits, &ans_axes.back().row_splits); ans_axes.back().row_ids = Array1<int32_t>(c, ans_axes.back().row_splits.Back()); RowSplitsToRowIds(ans_axes.back().row_splits, &ans_axes.back().row_ids); ans_axes.back().cached_tot_size = ans_axes.back().row_ids.Dim(); return RaggedShape(ans_axes); } RaggedShape Prefix(RaggedShape &src, int32_t n) { NVTX_RANGE(K2_FUNC); int32_t dim0 = src.Dim0(); K2_CHECK(n >= 0 && n <= dim0); src.Populate(); int32_t num_axes = src.NumAxes(); K2_CHECK_GE(num_axes, 2); const std::vector<RaggedShapeLayer> &axes_in = src.Layers(); std::vector<RaggedShapeLayer> axes_out(axes_in.size()); int32_t row_end = n; for (int32_t axis = 0; axis < num_axes - 1; ++axis) { axes_out[axis].row_splits = axes_in[axis].row_splits.Arange(0, row_end + 1); // notice here we may do a memory copy from GPU to CPU. row_end = axes_in[axis].row_splits[row_end]; axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end); axes_out[axis].cached_tot_size = row_end; } return RaggedShape(axes_out); } std::vector<RaggedShape> GetPrefixes(RaggedShape &src, const std::vector<int32_t> &sizes) { NVTX_RANGE(K2_FUNC); src.Populate(); int32_t dim0 = src.Dim0(); int32_t num_axes = src.NumAxes(); K2_CHECK_GE(num_axes, 2); ContextPtr &c = src.Context(); const std::vector<RaggedShapeLayer> &axes_in = src.Layers(); // get those row_end elements at each axis. int32_t ans_size = static_cast<int32_t>(sizes.size()); Array1<int32_t> row_ends(c, num_axes * ans_size); Array1<int32_t> sizes_array(GetCpuContext(), sizes); Array1<int32_t> indexes = row_ends.Arange(0, ans_size); indexes.CopyFrom(sizes_array); for (int32_t axis = 1; axis < num_axes; ++axis) { Array1<int32_t> curr_axis_row_ends = row_ends.Arange(axis * ans_size, (axis + 1) * ans_size); axes_in[axis - 1].row_splits.Index(indexes, &curr_axis_row_ends); indexes = curr_axis_row_ends; } row_ends = row_ends.To(GetCpuContext()); std::vector<RaggedShape> ans(ans_size); for (int32_t i = 0; i != ans_size; ++i) { std::vector<RaggedShapeLayer> axes_out(axes_in.size()); int32_t row_end = row_ends[i]; K2_CHECK(row_end >= 0 && row_end <= dim0); for (int32_t axis = 0; axis < num_axes - 1; ++axis) { axes_out[axis].row_splits = axes_in[axis].row_splits.Arange(0, row_end + 1); row_end = row_ends[i + (axis + 1) * ans_size]; axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end); axes_out[axis].cached_tot_size = row_end; } ans[i] = RaggedShape(axes_out, false); } return ans; } RaggedShape Arange(RaggedShape &src, int32_t axis, int32_t begin, int32_t end, std::pair<int32_t, int32_t> *value_range /*= nullptr*/) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); K2_CHECK_GE(num_axes, 2); K2_CHECK(axis >= 0 && axis < num_axes - 1); K2_CHECK(begin >= 0 && begin <= end && end <= src.TotSize(axis)); if (begin == end) { RaggedShape ans = EmptyRaggedShape(src.Context(), num_axes - axis); // as begin == end, user always get empty values when doing // `src.values.Arange(begin, end)` if (value_range != nullptr) *value_range = std::make_pair(begin, end); return ans; } src.Populate(); ContextPtr &c = src.Context(); const std::vector<RaggedShapeLayer> &axes_in = src.Layers(); int32_t ans_num_axes = num_axes - axis; // `-1` as Layers().size is NumAxes() - 1 std::vector<RaggedShapeLayer> axes_out(ans_num_axes - 1); // get those `row_begin` and `row_end` indexes for all axes in a kernel so we // can do just one GPU to CPU memory transfer. // the format of `indexes` is: row_begin_axis0, row_end_axis0, // row_begin_axis1, row_end_axis2, etc. axis0, axis1 here are the axis of ans. Array1<int32_t> indexes(c, ans_num_axes * 2); int32_t *indexes_data = indexes.Data(); RowSplitsAccessor<5> src_row_splits_acc(src); K2_EVAL( c, 1, lambda_set_indexes, (int32_t i)->void { // we just start a kernel with only one element here. K2_CHECK_EQ(i, 0); int32_t row_begin = begin, row_end = end; indexes_data[0] = row_begin, indexes_data[1] = row_end; for (int32_t cur_axis = axis; cur_axis < num_axes - 1; ++cur_axis) { row_begin = src_row_splits_acc(cur_axis)[row_begin]; row_end = src_row_splits_acc(cur_axis)[row_end]; int32_t indexes_pos = ((cur_axis - axis) + 1) * 2; indexes_data[indexes_pos] = row_begin; indexes_data[indexes_pos + 1] = row_end; } }); indexes = indexes.To(GetCpuContext()); int32_t row_begin = indexes[0], row_end = indexes[1]; for (int32_t cur_axis = axis; cur_axis < num_axes - 1; ++cur_axis) { axes_out[cur_axis - axis].row_splits = axes_in[cur_axis].row_splits.Arange(row_begin, row_end + 1); int32_t row_id = row_begin; int32_t indexes_pos = ((cur_axis - axis) + 1) * 2; row_begin = indexes[indexes_pos]; row_end = indexes[indexes_pos + 1]; axes_out[cur_axis - axis].row_splits = Minus(axes_out[cur_axis - axis].row_splits, row_begin); axes_out[cur_axis - axis].row_ids = axes_in[cur_axis].row_ids.Arange(row_begin, row_end); axes_out[cur_axis - axis].row_ids = Minus(axes_out[cur_axis - axis].row_ids, row_id); axes_out[cur_axis - axis].cached_tot_size = row_end - row_begin; } if (value_range != nullptr) *value_range = std::make_pair(row_begin, row_end); return RaggedShape(axes_out); } Ragged<int32_t> AddSuffixToRagged(Ragged<int32_t> &src, const Array1<int32_t> &suffix) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); K2_CHECK_GE(num_axes, 2); K2_CHECK_EQ(suffix.Dim(), src.TotSize(num_axes - 2)); ContextPtr &c = src.Context(); Array1<int32_t> dst_values(c, src.NumElements() + suffix.Dim()); RaggedShape dst_shape = ChangeSublistSize(src.shape, 1); // "row_splits1" and "row_ids1" below are actually on the last axis. We name // them with "1" so that we can use "idx01" and "idx0" for those indexes in // lambda, following the naming convention explained in k2/csrc/utils.h const int32_t *dst_row_splits1_data = dst_shape.RowSplits(num_axes - 1).Data(), *dst_row_ids1_data = dst_shape.RowIds(num_axes - 1).Data(), *src_values_data = src.values.Data(), *suffix_data = suffix.Data(); int32_t *dst_values_data = dst_values.Data(); K2_EVAL( c, dst_shape.NumElements(), lambda_copy_values, (int32_t idx01)->void { int32_t idx0 = dst_row_ids1_data[idx01]; if (idx01 == dst_row_splits1_data[idx0 + 1] - 1) { // idx01 points to the last element of this row; copy from suffix dst_values_data[idx01] = suffix_data[idx0]; } else { // copy from src int32_t src_idx01 = idx01 - dst_row_ids1_data[idx01]; dst_values_data[idx01] = src_values_data[src_idx01]; } }); return Ragged<int32_t>(dst_shape, dst_values); } Ragged<int32_t> AddPrefixToRagged(Ragged<int32_t> &src, const Array1<int32_t> &prefix) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); K2_CHECK_GE(num_axes, 2); K2_CHECK_EQ(prefix.Dim(), src.TotSize(num_axes - 2)); ContextPtr &c = src.Context(); Array1<int32_t> dst_values(c, src.NumElements() + prefix.Dim()); RaggedShape dst_shape = ChangeSublistSize(src.shape, 1); // "row_splits1" and "row_ids1" below are actually on the last axis. We name // them with "1" so that we can use "idx01" and "idx0" for those indexes in // lambda, following the naming convention explained in k2/csrc/utils.h const int32_t *dst_row_splits1_data = dst_shape.RowSplits(num_axes - 1).Data(), *dst_row_ids1_data = dst_shape.RowIds(num_axes - 1).Data(), *src_values_data = src.values.Data(), *prefix_data = prefix.Data(); int32_t *dst_values_data = dst_values.Data(); K2_EVAL( c, dst_shape.NumElements(), lambda_copy_values, (int32_t idx01)->void { int32_t idx0 = dst_row_ids1_data[idx01]; if (idx01 == dst_row_splits1_data[idx0]) { // idx01 points to the first element of this row; copy from prefix dst_values_data[idx01] = prefix_data[idx0]; } else { // copy from src int32_t src_idx01 = idx01 - dst_row_ids1_data[idx01] - 1; dst_values_data[idx01] = src_values_data[src_idx01]; } }); return Ragged<int32_t>(dst_shape, dst_values); } RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &renumbering) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(renumbering.NumOldElems(), src.NumElements()); // Make sure final row-ids are populated. src.RowIds(src.NumAxes() - 1); std::vector<RaggedShapeLayer> axes = src.Layers(); axes.back().row_ids = axes.back().row_ids[renumbering.New2Old()]; axes.back().row_splits = renumbering.Old2New()[axes.back().row_splits]; axes.back().cached_tot_size = axes.back().row_ids.Dim(); return RaggedShape(axes); } RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &r_before_last, Renumbering &r_last) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(r_before_last.NumOldElems(), src.TotSize(src.NumAxes() - 2)); K2_CHECK_EQ(r_last.NumOldElems(), src.NumElements()); // Make sure final and before-final row-ids are populated. src.RowIds(src.NumAxes() - 2); src.RowIds(src.NumAxes() - 1); std::vector<RaggedShapeLayer> axes = src.Layers(); // Suppose this shape has 3 axes (0,1,2). Its NumAxes()==3; // axes.size()==2. // r_before_last deals with the numbering on axis 1. // r_last deals with the numbering on axis 2. RaggedShapeLayer &before_last = axes[axes.size() - 2], &last = axes[axes.size() - 1]; int32_t new_tot_size1 = r_before_last.NumNewElems(), new_tot_size2 = r_last.NumNewElems(); ContextPtr c = src.Context(); Array1<int32_t> before_last_row_ids(c, new_tot_size1), last_row_splits(c, new_tot_size1 + 1), last_row_ids(c, new_tot_size2); // The variable names below use this 3-axis assumption but the // code will work for greater number of axes. int32_t *new_row_ids1_data = before_last_row_ids.Data(), *new_row_splits2_data = last_row_splits.Data(), *new_row_ids2_data = last_row_ids.Data(); const int32_t *old_row_ids1_data = before_last.row_ids.Data(), *old_row_splits2_data = last.row_splits.Data(), *old_row_ids2_data = last.row_ids.Data(); const int32_t *idx01_new2old_data = r_before_last.New2Old().Data(), *idx01_old2new_data = r_before_last.Old2New().Data(), *idx012_new2old_data = r_last.New2Old().Data(), *idx012_old2new_data = r_last.Old2New().Data(); ParallelRunner pr(c); { With w(pr.NewStream()); // before_last.row_splits maps from idx0 -> idx01 (contains idx01's). Map // the idx01's; the idx0s stay the same. before_last.row_splits = r_before_last.Old2New()[before_last.row_splits]; } { With w(pr.NewStream()); K2_EVAL( c, new_tot_size1 + 1, lambda_set_row_ids1_and_row_splits2, (int32_t new_idx01)->void { // row_ids1 maps from idx01 -> idx0. Select subset of // idx01's; the idx0 stays the same. int32_t old_idx01 = idx01_new2old_data[new_idx01]; if (new_idx01 < new_tot_size1) new_row_ids1_data[new_idx01] = old_row_ids1_data[old_idx01]; // row_splits2 maps from idx01 -> idx012. Map both indexes. // idx01's; the idx0 stays the same. new_row_splits2_data[new_idx01] = idx012_old2new_data[old_row_splits2_data[old_idx01]]; }); } { With w(pr.NewStream()); K2_EVAL( c, new_tot_size2, lambda_set_row_ids2, (int32_t new_idx012)->void { // row_ids2 maps from idx012 -> idx01. Both must be mapped. int32_t old_idx012 = idx012_new2old_data[new_idx012]; int32_t old_idx01 = old_row_ids2_data[old_idx012], new_idx01 = idx01_old2new_data[old_idx01]; new_row_ids2_data[new_idx012] = new_idx01; }); } before_last.row_ids = before_last_row_ids; before_last.cached_tot_size = new_tot_size1; last.row_splits = last_row_splits; last.row_ids = last_row_ids; last.cached_tot_size = new_tot_size2; return RaggedShape(axes); } RaggedShape EmptyRaggedShape(ContextPtr &c, int32_t num_axes) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(num_axes, 2); std::vector<RaggedShapeLayer> axes(num_axes - 1); axes[0].row_splits = Array1<int32_t>(c, 1, 0); // row_ids will be the empty vector, with context `c`. axes[0].row_ids = axes[0].row_splits.Range(0, 0); axes[0].cached_tot_size = 0; for (int32_t a = 1; a + 1 < num_axes; ++a) axes[a] = axes[0]; return RaggedShape(axes); } Array1<int32_t> GetDecreasingSizeOrder(RaggedShape &shape) { NVTX_RANGE(K2_FUNC); ContextPtr &c = shape.Context(); Array1<int32_t> sizes = RowSplitsToSizes(shape.RowSplits(1)); Array1<int32_t> index_map; Sort<int32_t, GreaterThan<int32_t>>(&sizes, &index_map); return index_map; } RaggedShape GetLayer(const RaggedShape &src, int32_t layer) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(layer, 0); K2_CHECK_LT(layer, src.NumAxes() - 1); std::vector<RaggedShapeLayer> layers; layers.push_back(src.Layers()[layer]); bool check = false; return RaggedShape(layers, check); } void DecomposeRaggedShape(const RaggedShape &src, int32_t axis, RaggedShape *top, RaggedShape *bottom) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(axis, 0); K2_CHECK_LT(axis, src.NumAxes() - 1); const std::vector<RaggedShapeLayer> &src_layers = src.Layers(); std::vector<RaggedShapeLayer> top_layers(axis), bottom_layers(src_layers.size() - axis); int32_t src_size = static_cast<int32_t>(src_layers.size()); for (int32_t i = 0; i < axis; ++i) top_layers[i] = src_layers[i]; for (int32_t i = axis; i < src_size; ++i) bottom_layers[i - axis] = src_layers[i]; *top = RaggedShape(top_layers); *bottom = RaggedShape(bottom_layers); } RaggedShape RemoveEmptyLists(RaggedShape &src_shape, int32_t axis, Renumbering *renumbering_out) { NVTX_RANGE(K2_FUNC); if (axis == 0) { return RemoveEmptyListsAxis0(src_shape, renumbering_out); } RaggedShape top_shape, bottom_shape; DecomposeRaggedShape(src_shape, axis, &top_shape, &bottom_shape); Renumbering r_temp; if (!renumbering_out) renumbering_out = &r_temp; bottom_shape = RemoveEmptyListsAxis0(bottom_shape, renumbering_out); top_shape = SubsampleRaggedShape(top_shape, *renumbering_out); return ComposeRaggedShapes(top_shape, bottom_shape); } RaggedShape RemoveSomeEmptyLists(RaggedShape &src_shape, int32_t axis, Renumbering &renumbering) { NVTX_RANGE(K2_FUNC); if (axis == 0) { return RenumberAxis0Simple(src_shape, renumbering); } RaggedShape top_shape, bottom_shape; DecomposeRaggedShape(src_shape, axis, &top_shape, &bottom_shape); bottom_shape = RenumberAxis0Simple(bottom_shape, renumbering); top_shape = SubsampleRaggedShape(top_shape, renumbering); return ComposeRaggedShapes(top_shape, bottom_shape); } RaggedShape RemoveEmptyListsAxis0(RaggedShape &src_shape, Renumbering *renumbering_out) { NVTX_RANGE(K2_FUNC); Renumbering r_temp; if (!renumbering_out) renumbering_out = &r_temp; ContextPtr &c = src_shape.Context(); int32_t num_lists = src_shape.Dim0(); *renumbering_out = Renumbering(c, num_lists); const int32_t *row_splits_data = src_shape.RowSplits(1).Data(); char *keep_data = renumbering_out->Keep().Data(); K2_EVAL( c, num_lists, lambda_set_keep, (int32_t i)->void { keep_data[i] = (row_splits_data[i + 1] != row_splits_data[i]); }); return RenumberAxis0Simple(src_shape, *renumbering_out); } RaggedShape RenumberAxis0Simple(RaggedShape &src_shape, Renumbering &renumbering) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(renumbering.NumOldElems(), src_shape.Dim0()); ContextPtr c = src_shape.Context(); src_shape.RowIds(1); // make sure RowIds(1) is populated. std::vector<RaggedShapeLayer> layers = src_shape.Layers(); int32_t num_layers = layers.size(); int32_t new_num_lists = renumbering.NumNewElems(), num_elems = src_shape.TotSize(1); // unchanged old vs. new. Array1<int32_t> new_row_splits(c, new_num_lists + 1), new_row_ids = renumbering.Old2New()[src_shape.RowIds(1)]; int32_t *new_row_splits_data = new_row_splits.Data(); const int32_t *old_row_splits_data = src_shape.RowSplits(1).Data(), *new2old_data = renumbering.New2Old().Data(); // set `new_row_splits_data`. #ifndef NDEBUG { Array1<int32_t> is_ok(c, 1, 1); int32_t *is_ok_data = is_ok.Data(); int32_t old_num_lists = src_shape.Dim0(); const int32_t *old2new_data = renumbering.Old2New().Data(); K2_EVAL( c, old_num_lists, lambda_check_preconditions, (int32_t i)->void { if (old2new_data[i + 1] == old2new_data[i]) { // This list not kept if (old_row_splits_data[i + 1] != old_row_splits_data[i]) { // this list was nonempty... is_ok_data[0] = 0; } } }); K2_CHECK_NE(is_ok[0], 0) << "RenumberAxis0Simple(): preconditions not met; " "renumbering removes nonempty lists."; } #endif K2_EVAL( c, new_num_lists + 1, lambda_set_new_row_splits, (int32_t new_i)->void { int32_t j; if (new_i == new_num_lists) { j = num_elems; } else { int32_t old_i = new2old_data[new_i]; j = old_row_splits_data[old_i]; } new_row_splits_data[new_i] = j; }); layers[0].row_splits = new_row_splits; layers[0].row_ids = new_row_ids; // no need to set its cached_tot_size; that didn't change. return RaggedShape(layers); } RaggedShape CoveringShape(int32_t num_srcs, RaggedShape **srcs) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_srcs, 0); if (num_srcs == 1) return *srcs[0]; K2_CHECK_EQ(srcs[0]->NumAxes(), 2); int32_t dim0 = srcs[0]->Dim0(); ContextPtr &c = srcs[0]->Context(); for (int32_t i = 1; i != num_srcs; ++i) { K2_CHECK_EQ(srcs[i]->NumAxes(), 2); K2_CHECK_EQ(srcs[i]->Dim0(), dim0); K2_CHECK(c->IsCompatible(*srcs[i]->Context())); } // get row splits of srcs Array1<int32_t *> row_splits_ptrs(GetCpuContext(), num_srcs); int32_t **splits_ptr_data = row_splits_ptrs.Data(); for (int32_t i = 0; i != num_srcs; ++i) { splits_ptr_data[i] = srcs[i]->RowSplits(1).Data(); } row_splits_ptrs = row_splits_ptrs.To(c); int32_t **src_row_splits_ptr_data = row_splits_ptrs.Data(); RaggedShape shape = RegularRaggedShape(c, dim0, num_srcs); Array1<int32_t> values(c, dim0 * num_srcs); // elements in row i of `sublist_sizes` are the sizes of row i // of src[0], src[1]... Ragged<int32_t> sublist_sizes(shape, values); int32_t *values_data = sublist_sizes.values.Data(); K2_EVAL2( c, dim0, num_srcs, lambda_set_sublist_sizes, (int32_t i, int32_t j)->void { values_data[i * num_srcs + j] = src_row_splits_ptr_data[j][i + 1] - src_row_splits_ptr_data[j][i]; }); Array1<int32_t> ans_row_splits(c, dim0 + 1); Array1<int32_t> ans_row_sizes = ans_row_splits.Arange(0, dim0); MaxPerSublist(sublist_sizes, 0, &ans_row_sizes); ExclusiveSum(ans_row_sizes, &ans_row_splits); return RaggedShape2(&ans_row_splits, nullptr, -1); } Array1<int32_t> CoveringShapeForwardMap(RaggedShape &src, RaggedShape &covering) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(src.NumAxes(), 2); K2_CHECK_EQ(covering.NumAxes(), 2); K2_CHECK_EQ(src.Dim0(), covering.Dim0()); int32_t num_elems = covering.NumElements(); K2_CHECK_GE(num_elems, src.NumElements()); ContextPtr c = GetContext(src, covering); Array1<int32_t> ans(c, num_elems); int32_t *ans_data = ans.Data(); const int32_t *covering_row_splits_data = covering.RowSplits(1).Data(), *covering_row_ids_data = covering.RowIds(1).Data(), *src_row_splits_data = src.RowSplits(1).Data(); K2_EVAL( c, num_elems, lambda_set_value, (int32_t covering_idx01)->void { int32_t covering_idx0 = covering_row_ids_data[covering_idx01], covering_idx0x = covering_row_splits_data[covering_idx0], covering_idx1 = covering_idx01 - covering_idx0x; // src and covering has the same dim0 int32_t src_idx0x = src_row_splits_data[covering_idx0], src_cur_row_size = src_row_splits_data[covering_idx0 + 1] - src_idx0x; K2_DCHECK_GE( covering_row_splits_data[covering_idx0 + 1] - covering_idx0x, src_cur_row_size); if (covering_idx1 >= src_cur_row_size) ans_data[covering_idx01] = -1; else ans_data[covering_idx01] = src_idx0x + covering_idx1; // src_idx01 }); return ans; } void RaggedShapeAxis0Splitter::Init(RaggedShape &src) { NVTX_RANGE(K2_FUNC); int32_t num_layers = src.NumLayers(), num_layers_out = num_layers - 1, dim0 = src.Dim0(); K2_CHECK_LE(num_layers_out, 4); // If this fails, add something to the 4s and // 5s here and in the header. K2_CHECK_GT(num_layers, 1); ContextPtr c = src.Context(); composite_row_splits_ = Array2<int32_t>(c, num_layers + 1, dim0 + 1); Array2Accessor<int32_t> composite_row_splits_acc = composite_row_splits_.Accessor(); RowSplitsAccessor<5> src_row_splits_acc(src); SmallVec<int32_t *, 5> row_splits_out_acc; K2_CHECK(num_layers_out <= 5); Array1<int32_t> garbage1(c, dim0 + dim0 + 1); // won't be read. row_splits_out_acc.data[0] = garbage1.Data(); for (int32_t l = 0; l < num_layers_out; l++) { row_splits_out_[l] = Array1<int32_t>(c, src.TotSize(l + 1) + dim0 + 1); row_splits_out_acc.data[l + 1] = row_splits_out_[l].Data(); } // set composite_row_splits_ and also those elements of // the output row_splits which are bound to be zero. K2_EVAL( c, dim0 + 1, lambda_set_composite_row_splits, (int32_t i)->void { int32_t cur_pos = i; composite_row_splits_acc(0, i) = cur_pos; for (int32_t l = 0; l < num_layers; l++) { // The following statement sets the zero at the beginning of each // row_splits, plus a final zero that we write to avoid an // if-statement. row_splits_out_acc.data[l][cur_pos + i] = 0; cur_pos = src_row_splits_acc.ptrs[l][cur_pos]; composite_row_splits_acc(l + 1, i) = cur_pos; } }); composite_row_splits_cpu_ = composite_row_splits_.To(GetCpuContext()); // Right now to_idx0 maps from an idx0 to an idx0 (identity map); next time it // will map from an idx01 to to an idx0, then idx012 to idx0 (all w.r.t. src). // It doesn't include the extra last element like a row_splits would; it's // like a composite row_ids vector: row_ids1, row_ids12 and so on. Array1<int32_t> to_idx0 = composite_row_splits_.Row(0).Arange(0, dim0); for (int32_t layer = 0; layer < num_layers_out; layer++) row_ids_out_[layer] = Array1<int32_t>(c, src.TotSize(layer + 2)); Array1<int32_t> garbage2(c, src.TotSize(1)); // corresponds to row_ids_out_[-1]. for (int32_t layer = 0; layer <= num_layers_out; layer++) { // num_elems is the number of elements we process in this kernel. int32_t num_elems = src.TotSize(layer + 1); // The names here are valid for layer == 1; this just happens to be useful // for exposition. const int32_t *src_row_ids2_data = src.RowIds(layer + 1).Data(), *idx01_to_idx0_data = to_idx0.Data(); int32_t *row_ids1_out_data = (layer == 0 ? garbage2.Data() : row_ids_out_[layer - 1].Data()); if (layer < num_layers_out) { Array1<int32_t> to_idx0_next(c, num_elems); int32_t *row_splits2_out_data = row_splits_out_[layer].Data(), *idx012_to_idx0_data = to_idx0_next.Data(); const int32_t *src_row_splits3_data = src.RowSplits(layer + 2).Data(); // row_splits3 maps from idx012 -> idx012x. // remember: the names are valid for layer == 1, just as an example. K2_EVAL( c, num_elems, lambda_set_row_splits_and_ids, (int32_t src_idx012)->void { int32_t src_idx01 = src_row_ids2_data[src_idx012], src_idx012x_next = src_row_splits3_data[src_idx012 + 1], src_idx0 = idx01_to_idx0_data[src_idx01]; idx012_to_idx0_data[src_idx012] = src_idx0; // <-- output here. int32_t src_idx0x = composite_row_splits_acc(layer, src_idx0), src_idx0xxx = composite_row_splits_acc(layer + 2, src_idx0), src_idx1 = src_idx01 - src_idx0x, src_idx12x_next = src_idx012x_next - src_idx0xxx, out_idx0 = src_idx1, out_idx01x_next = src_idx12x_next; row_ids1_out_data[src_idx012] = out_idx0; // below, the "+1" is because each element handles the next one // within this output row_splits array, with the zeros (1st elem of // each output row_splits array) handled by // lambda_set_composite_row_splits. The "+ idx0" is to make room // for the extra final element of all the previous row_splits // arrays. row_splits2_out_data[src_idx012 + 1 + src_idx0] = out_idx01x_next; }); to_idx0 = to_idx0_next; } else { // The next code is a subset of the other branch. K2_EVAL( c, num_elems, lambda_set_row_ids, (int32_t src_idx012)->void { int32_t src_idx01 = src_row_ids2_data[src_idx012], idx0 = idx01_to_idx0_data[src_idx01], src_idx0x = composite_row_splits_acc(layer, idx0), src_idx1 = src_idx01 - src_idx0x, out_idx0 = src_idx1; row_ids1_out_data[src_idx012] = out_idx0; }); } } } RaggedShape RaggedShapeAxis0Splitter::GetElement(int32_t i, int32_t *elem_offset) { NVTX_RANGE(K2_FUNC); int32_t num_layers_out = composite_row_splits_.Dim0() - 2; std::vector<RaggedShapeLayer> out; out.reserve(num_layers_out); auto composite_row_splits_cpu_acc = composite_row_splits_cpu_.Accessor(); for (int32_t layer = 0; layer < num_layers_out; layer++) { int32_t row_begin = composite_row_splits_cpu_acc(layer + 1, i), row_end = composite_row_splits_cpu_acc(layer + 1, i + 1), elem_begin = composite_row_splits_cpu_acc(layer + 2, i), elem_end = composite_row_splits_cpu_acc(layer + 2, i + 1), num_elems = elem_end - elem_begin; if (layer + 1 == num_layers_out && elem_offset != nullptr) *elem_offset = elem_begin; // the "+ i" is to account for the extra final elements of preceding // row_splits vectors; the + 1 is for the final element of this one. Array1<int32_t> splits = row_splits_out_[layer].Arange(row_begin + i, row_end + i + 1), ids = row_ids_out_[layer].Arange(elem_begin, elem_end); out.emplace_back(RaggedShapeLayer{splits, ids, num_elems}); } // TODO: when thoroughly debugged, maybe turn off validation? return RaggedShape(out); } namespace hash_internal { // Utilities for hashing strings (actually: sequences of int32_t). /* T can be int32_t or int64_t. The following code shows what we are computing: std::vector<int32_t> input; T hash1 = 13, hash2 = 787; for (size_t i = 0; i < input.size(); i++) { hash1 = 31 * hash1 + input[i]; hash2 = 167 * hash2 + input[i]; } hash = hash1 + 104729 * hash2; I'm not sure that these constants are very optimal, but they are primes. The actual calculation is a little different from the above because of the need to do it via a reduction. */ template <typename T> struct Hash { T hash1; T hash2; T product1; T product2; // Would like this to be a POD type so not adding the following constructor: // Hash(int32_t i): hash1(i), hash2(i), product1(31), product2(167) { } // .. but implementing it in HashInputIterator. }; template <typename T> struct HashInputIterator { explicit __host__ __device__ __forceinline__ HashInputIterator(const int32_t *i) // NOLINT : i_(i) {} __device__ __forceinline__ Hash<T> operator[](int32_t idx) const { return Hash<T>{i_[idx], i_[idx], 31, 167}; } __device__ __forceinline__ HashInputIterator operator+(int32_t offset) const { return HashInputIterator(i_ + offset); } const int32_t *i_; }; template <typename T> struct HashOutputIteratorDeref { // this is what you get when you dereference // HashOutputIterator, it pretends to be a // Hash<T> but really only stores the `idx` // member. explicit __device__ __forceinline__ HashOutputIteratorDeref(T *t) : t_(t) {} __device__ __forceinline__ HashOutputIteratorDeref &operator=( const Hash<T> &h) { *t_ = h.hash1 + 13 * h.product1 + 104729 * h.hash2 + (104729 * 787) * h.product2; return *this; } T *t_; }; template <typename T> struct HashOutputIterator { // outputs just the index of the pair. explicit HashOutputIterator(T *t) : t_(t) {} __device__ __forceinline__ HashOutputIteratorDeref<T> operator[]( int32_t idx) const { return HashOutputIteratorDeref<T>(t_ + idx); } __device__ __forceinline__ HashOutputIterator operator+(size_t offset) { return HashOutputIterator{t_ + offset}; } T *t_; }; template <typename T> struct HashCombineOp { __device__ __forceinline__ Hash<T> operator()(const Hash<T> &a, const Hash<T> &b) const { return Hash<T>{a.hash1 * b.product1 + b.hash1, a.hash2 * b.product2 + b.hash2, a.product1 * b.product1, a.product2 * b.product2}; } }; } // namespace hash_internal } // namespace k2 namespace std { // those below typedefs are required by hipcub::DeviceSegmentedReduce:Reduce template <typename T> struct iterator_traits<k2::hash_internal::HashInputIterator<T>> { typedef k2::hash_internal::Hash<T> value_type; }; template <typename T> struct iterator_traits<k2::hash_internal::HashOutputIterator<T>> { typedef k2::hash_internal::Hash<T> value_type; typedef k2::hash_internal::HashOutputIteratorDeref<T> reference; }; } // namespace std namespace k2 { template <typename T> Array1<T> ComputeHash(Ragged<int32_t> &src) { NVTX_RANGE(K2_FUNC); int32_t last_axis = src.NumAxes() - 1; const Array1<int32_t> &row_splits_array = src.RowSplits(last_axis); int32_t num_rows = row_splits_array.Dim() - 1; ContextPtr &c = src.Context(); Array1<T> ans(c, num_rows); const int32_t *row_splits = row_splits_array.Data(); const int32_t *values_data = src.values.Data(); T *output_data = ans.Data(); if (c->GetDeviceType() == kCpu) { int32_t j = row_splits[0]; for (int32_t i = 0; i < num_rows; ++i) { T hash1 = 13, hash2 = 787; int32_t row_end = row_splits[i + 1]; for (; j < row_end; ++j) { T elem = values_data[j]; hash1 = 31 * hash1 + elem; hash2 = 167 * hash2 + elem; } T hash = hash1 + 104729 * hash2; output_data[i] = hash; } } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); hash_internal::HashInputIterator<T> input_iter(values_data); hash_internal::HashOutputIterator<T> output_iter(output_data); hash_internal::HashCombineOp<T> op; hash_internal::Hash<T> initial_hash{ 0, 0, 1, 1 }; // This code is based on the example here: // https://nvlabs.github.io/cub/structcub_1_1_device_segmented_reduce.html std::size_t temp_storage_bytes = 0; // the first time is to determine temporary device storage requirements K2_CUDA_SAFE_CALL(hipcub::DeviceSegmentedReduce::Reduce( nullptr, temp_storage_bytes, input_iter, output_iter, num_rows, row_splits, row_splits + 1, op, initial_hash, c->GetCudaStream())); Array1<int8_t> d_temp_storage(c, temp_storage_bytes); K2_CUDA_SAFE_CALL(hipcub::DeviceSegmentedReduce::Reduce( d_temp_storage.Data(), temp_storage_bytes, input_iter, output_iter, num_rows, row_splits, row_splits + 1, op, initial_hash, c->GetCudaStream())); } return ans; } Ragged<int32_t> UniqueSequences(Ragged<int32_t> &src, Ragged<int32_t> *num_repeats /*=nullptr*/, Array1<int32_t> *new2old_indexes /*=nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); if (src.NumAxes() == 2) { // Put 'fake' layer at front, process, then remove. Ragged<int32_t> temp = Unsqueeze(src, 0); return UniqueSequences(temp, num_repeats, new2old_indexes).RemoveAxis(0); } Array1<int64_t> hashes = ComputeHash<int64_t>(src); int32_t hashes_dim = hashes.Dim(); Array1<int32_t> order(c, hashes_dim); // Using the layer before the last layer of `src` for the shape of // `ragged_hashes` Ragged<int64_t> ragged_hashes(GetLayer(src.shape, src.shape.NumLayers() - 2), hashes); SortSublists<int64_t, LessThan<int64_t> >(&ragged_hashes, &order); Renumbering renumber_lists(c, hashes.Dim()); const int32_t *ragged_hashes_row_ids_data = ragged_hashes.RowIds(1).Data(), *ragged_hashes_row_splits_data = ragged_hashes.RowSplits(1).Data(); const int64_t *ragged_hashes_data = ragged_hashes.values.Data(); char *keep_list_data = renumber_lists.Keep().Data(); K2_EVAL( c, hashes_dim, lambda_set_keep, (int32_t i)->void { char keep; if (i == ragged_hashes_row_splits_data[ragged_hashes_row_ids_data[i]]) { // this is the first element of its sub-list in `ragged_hashes`. keep = 1; } else { keep = (ragged_hashes_data[i] != ragged_hashes_data[i - 1]); } keep_list_data[i] = keep; }); Array1<int32_t> new2old = renumber_lists.New2Old(), new2unsorted = order[new2old]; Ragged<int32_t> ans = Index(src, src.NumAxes() - 2, new2unsorted); if (num_repeats != nullptr) { int32_t new2old_dim = new2old.Dim(); Array1<int32_t> num_repeats_array(c, new2old_dim); const int32_t *new2old_data = new2old.Data(); int32_t *num_repeats_data = num_repeats_array.Data(); K2_EVAL( c, new2old_dim, set_num_repeats, (int32_t i)->void { if (i < new2old_dim - 1) { num_repeats_data[i] = new2old_data[i + 1] - new2old_data[i]; } else { num_repeats_data[i] = hashes_dim - new2old_data[i]; } }); *num_repeats = Ragged<int32_t>(GetLayer(ans.shape, ans.NumAxes() - 3), num_repeats_array); } if (new2old_indexes != nullptr) { *new2old_indexes = std::move(new2unsorted); } return ans; } // Instantiate template for int64 and int32. template Array1<int64_t> ComputeHash(Ragged<int32_t> &src); template Array1<int32_t> ComputeHash(Ragged<int32_t> &src); } // namespace k2
4acd95ff278d85534f7f8db122902ee8028ba573.cu
/** * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * Mobvoi Inc. (authors: Fangjun Kuang) * Yiming Wang * * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <cmath> #include <memory> #include <vector> #include "k2/csrc/array_ops.h" #include "k2/csrc/cub.h" #include "k2/csrc/macros.h" #include "k2/csrc/math.h" #include "k2/csrc/moderngpu_allocator.h" #include "k2/csrc/ragged.h" #include "k2/csrc/ragged_ops.h" #include "k2/csrc/ragged_utils.h" #include "moderngpu/kernel_mergesort.hxx" namespace { /* A helper function used in RaggedShape3; if both first and second are non-NULL, it will check if the context of them is compatible or not and return that context if compatible; if one of them is NULL, returns the other one's context. */ static k2::ContextPtr GetContext(const k2::Array1<int32_t> *first, const k2::Array1<int32_t> *second) { K2_CHECK(first != nullptr || second != nullptr) << "At least one of first and second must be non-NULL"; if (first == nullptr) return second->Context(); else if (second == nullptr) return first->Context(); else return k2::GetContext(*first, *second); } } // namespace namespace k2 { RaggedShape RandomRaggedShape(bool set_row_ids, int32_t min_num_axes, int32_t max_num_axes, int32_t min_num_elements, int32_t max_num_elements) { ContextPtr c = GetCpuContext(); K2_CHECK(min_num_axes >= 2 && max_num_axes >= min_num_axes && min_num_elements >= 0 && max_num_elements >= min_num_elements); int32_t num_axes = RandInt(min_num_axes, max_num_axes); int32_t num_elements = RandIntGeometric(min_num_elements, max_num_elements); bool done_repeats = false; std::vector<RaggedShapeLayer> axes(num_axes - 1); for (int32_t axis = num_axes - 2; axis >= 0; axis--) { // this axis will have row_ids of length num_elements and // row_splits of length to be determined. int32_t cur_row_split = 0; std::vector<int32_t> row_splits_vec; std::vector<int32_t> row_ids_vec; row_splits_vec.push_back(cur_row_split); // The reason for "|| RandInt(0, 2) == 0)" is so that even if there // are no elements we can still potentially generate empty row-splits. while (cur_row_split < num_elements || RandInt(0, 2) == 0) { int32_t split_size = RandIntGeometric(0, num_elements - cur_row_split); cur_row_split += split_size; // sometimes we have a bunch of empty rows in a row (this will test out // more of the code), so here we generate a bunch of empty rows, but we // just do this only once (that's why we declare `done_repeats` here). if (split_size == 0 && RandInt(0, 30) == 0 && !done_repeats) { int32_t num_repeats = RandIntGeometric(1, 128); row_splits_vec.insert(row_splits_vec.end(), num_repeats, cur_row_split); // don't need to set `row_ids_vec` as there's no element. done_repeats = true; } row_splits_vec.push_back(cur_row_split); if (set_row_ids) { int32_t cur_row = static_cast<int32_t>(row_splits_vec.size()) - 2; row_ids_vec.insert(row_ids_vec.end(), split_size, cur_row); } } axes[axis].row_splits = Array1<int32_t>(c, row_splits_vec); if (set_row_ids) axes[axis].row_ids = Array1<int32_t>(c, row_ids_vec); axes[axis].cached_tot_size = num_elements; num_elements = axes[axis].row_splits.Dim() - 1; } // RaggedShape(axes, true) will check the returned RaggedShape for // consistency. return RaggedShape(axes, true); } RaggedShape RaggedShape2(Array1<int32_t> *row_splits, Array1<int32_t> *row_ids, int32_t cached_tot_size) { NVTX_RANGE(K2_FUNC); K2_CHECK(row_splits != nullptr || row_ids != nullptr) << "At least one of row_splits and row_ids must be defined"; ContextPtr ctx = ::GetContext(row_splits, row_ids); if (cached_tot_size != -1) { if (row_ids != nullptr) K2_CHECK_EQ(cached_tot_size, row_ids->Dim()); if (row_splits != nullptr) { // may be slow as it may copy memory from device to host K2_DCHECK_EQ(cached_tot_size, row_splits->Back()) << "Bad row splits is: " << *row_splits; } } std::vector<RaggedShapeLayer> axes(1); if (row_splits != nullptr) { axes[0].row_splits = *row_splits; } else { // we need to work out row_splits as we always require row_splits is not // empty for RaggedShape. Note here we suppose the last element in row_ids // is num_rows - 1, i.e. there're no empty rows after row `row_ids[-1]`. int32_t num_rows = row_ids->Dim() == 0 ? 0 : row_ids->Back() + 1; Array1<int32_t> row_splits_array(ctx, num_rows + 1); RowIdsToRowSplits(*row_ids, &row_splits_array); axes[0].row_splits = row_splits_array; } if (row_ids != nullptr) axes[0].row_ids = *row_ids; if (cached_tot_size == -1) { cached_tot_size = row_ids != nullptr ? row_ids->Dim() : axes[0].row_splits.Back(); } axes[0].cached_tot_size = cached_tot_size; // note below line will check if row_splits and row_ids are valid and agree // with each other. return RaggedShape(axes); } RaggedShape ComposeRaggedShapes(const RaggedShape &a, const RaggedShape &b) { NVTX_RANGE(K2_FUNC); if (a.NumElements() != b.Dim0()) { K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements() << " vs. " << b.Dim0(); } K2_CHECK(IsCompatible(a, b)); const auto &a_axes = a.Layers(); const auto &b_axes = b.Layers(); std::size_t a_size = a_axes.size(), b_size = b_axes.size(); std::vector<RaggedShapeLayer> axes; axes.reserve(a_size + b_size); for (std::size_t i = 0; i < a_size; ++i) axes.emplace_back(a_axes[i]); for (std::size_t i = 0; i < b_size; ++i) axes.emplace_back(b_axes[i]); bool validate = false; return RaggedShape(axes, validate); } RaggedShape ComposeRaggedShapes3(const RaggedShape &a, const RaggedShape &b, const RaggedShape &c) { NVTX_RANGE(K2_FUNC); if (a.NumElements() != b.Dim0()) { K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements() << " vs. " << b.Dim0(); } if (b.NumElements() != c.Dim0()) { K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << b.NumElements() << " vs. " << c.Dim0(); } K2_CHECK(IsCompatible(a, b)); K2_CHECK(IsCompatible(b, c)); const auto &a_axes = a.Layers(); const auto &b_axes = b.Layers(); const auto &c_axes = c.Layers(); std::size_t a_size = a_axes.size(), b_size = b_axes.size(), c_size = c_axes.size(); std::vector<RaggedShapeLayer> axes; axes.reserve(a_size + b_size + c_size); for (std::size_t i = 0; i < a_size; ++i) axes.emplace_back(a_axes[i]); for (std::size_t i = 0; i < b_size; ++i) axes.emplace_back(b_axes[i]); for (std::size_t i = 0; i < c_size; ++i) axes.emplace_back(c_axes[i]); bool validate = false; return RaggedShape(axes, validate); } RaggedShape RaggedShape3(Array1<int32_t> *row_splits1, Array1<int32_t> *row_ids1, int32_t cached_tot_size1, Array1<int32_t> *row_splits2, Array1<int32_t> *row_ids2, int32_t cached_tot_size2) { NVTX_RANGE(K2_FUNC); RaggedShape shape1 = RaggedShape2(row_splits1, row_ids1, cached_tot_size1); Array1<int32_t> temp_array; if (row_splits2 == nullptr) { K2_CHECK_NE(row_ids2, nullptr) << "Either row-splits or row-ids must be defined"; temp_array = Array1<int32_t>(row_ids2->Context(), shape1.NumElements() + 1); row_splits2 = &temp_array; RowIdsToRowSplits(*row_ids2, row_splits2); } return ComposeRaggedShapes( shape1, RaggedShape2(row_splits2, row_ids2, cached_tot_size2)); } RaggedShape RaggedShape4(Array1<int32_t> *row_splits1, Array1<int32_t> *row_ids1, int32_t cached_tot_size1, Array1<int32_t> *row_splits2, Array1<int32_t> *row_ids2, int32_t cached_tot_size2, Array1<int32_t> *row_splits3, Array1<int32_t> *row_ids3, int32_t cached_tot_size3) { NVTX_RANGE(K2_FUNC); RaggedShape shape12 = RaggedShape3(row_splits1, row_ids1, cached_tot_size1, row_splits2, row_ids2, cached_tot_size2); Array1<int32_t> temp_array; if (row_splits3 == nullptr) { K2_CHECK_NE(row_ids3, nullptr) << "Either row-splits or row-ids must be defined"; temp_array = Array1<int32_t>(row_ids3->Context(), shape12.NumElements() + 1); row_splits3 = &temp_array; RowIdsToRowSplits(*row_ids3, row_splits3); } return ComposeRaggedShapes( shape12, RaggedShape2(row_splits3, row_ids3, cached_tot_size3)); } RaggedShape RaggedShapeFromTotSizes(ContextPtr c, int32_t num_axes, const int32_t *tot_sizes) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(num_axes, 2); std::vector<RaggedShapeLayer> axes(num_axes - 1); // In future we might choose to allocate everything in one big array, to avoid // multiple allocations, but for now just do it the simple way. for (int32_t axis = 1; axis < num_axes; ++axis) { axes[axis - 1].row_splits = Array1<int32_t>(c, tot_sizes[axis - 1] + 1); axes[axis - 1].row_ids = Array1<int32_t>(c, tot_sizes[axis]); axes[axis - 1].cached_tot_size = tot_sizes[axis]; } // Not check here as we did not set the values of row_splits and row_ids return RaggedShape(axes, false); } // See declaration in ragged.h for documentation of its purpose and interface. RaggedShape Unsqueeze(const RaggedShape &src, int32_t axis) { // If axis == 0, initial row_splits and row_ids will look like the following, // if for example src.Dim0() was 5: [ 0 5 ], [ 0 0 0 0 0 ]. The other axes // would be pushed forward. // // If 0 < axis <= src.NumAxes(), the inserted row_splits and row_ids would // look like the following, if for instance the src.TotSize(axis) = 8: // [ 0 1 2 3 4 5 6 7 8 ], [ 0 1 2 3 4 5 6 7 ]. // // The reason why the code is different for axis == 0, is that in that case we // are really making visible an "implicit" axis of the input `src`; we could // call it axis 0 of the original RaggedShape. Imagine that "implicit" axis's // row_splits and row_ids map respectively from an idx_minus1 -> idx0 and from // an idx_0 to idx_minus1, where idx_minus1 is always 0 and 0 <= idx0 < // Dim0(). NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); K2_CHECK(axis >= 0 && axis <= src.NumAxes()); const std::vector<RaggedShapeLayer> &axes_in = src.Layers(); int32_t num_axes_in = src.NumAxes(); // Note: in RaggedShape, the vector of RaggedShapeLayer is of length // num_axes - 1, so the output will have one more axis than the input. std::vector<RaggedShapeLayer> axes_out(num_axes_in); int32_t row_splits_dim, row_ids_dim; Array1<int32_t> mem; if (axis == 0) { row_splits_dim = 2; // e.g. [ 0 5 ] row_ids_dim = src.Dim0(); // e.g. [ 0 0 0 0 0 ] mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim); int32_t *mem_data = mem.Data(); K2_EVAL( c, mem.Dim(), lambda_set_mem, (int32_t i)->void { if (i == 1) mem_data[i] = row_ids_dim; else mem_data[i] = 0; }); } else { int32_t tot_size = src.TotSize(axis); row_splits_dim = tot_size + 1; row_ids_dim = tot_size; mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim); int32_t *mem_data = mem.Data(); K2_EVAL( c, mem.Dim(), lambda_set_mem2, (int32_t i)->void { mem_data[i] = i % (tot_size + 1); }); } axes_out[axis].row_splits = mem.Range(0, row_splits_dim); axes_out[axis].row_ids = mem.Range(row_splits_dim, row_ids_dim); axes_out[axis].cached_tot_size = row_ids_dim; for (int32_t i = 0; i < axis; ++i) axes_out[i] = axes_in[i]; // Note: the returned array has `num_axes_in + 1` axes, so its // array of RaggedShapeLayer is of length `num_axes_in`. for (int32_t i = axis + 1; i < num_axes_in; ++i) axes_out[i] = axes_in[i - 1]; return RaggedShape(axes_out); } std::vector<RaggedShape> UnsqueezeParallel(int32_t num_srcs, RaggedShape **src, int32_t axis) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(axis, 0); std::vector<RaggedShape> ans; if (num_srcs == 0) return ans; ans.reserve(num_srcs); ContextPtr &c = src[0]->Context(); std::vector<int32_t> all_row_splits_vec(num_srcs * 2); int32_t max_dim = 0; // all_row_splits_vec will contain [ 0 d0 0 d1 0 d2 .. ] // where d0 == src[0]->Dim0(), d1 == src[1]->Dim0().. for (int32_t i = 0; i < num_srcs; ++i) { int32_t this_dim0 = src[i]->Dim0(); if (this_dim0 > max_dim) max_dim = this_dim0; all_row_splits_vec[i * 2] = 0; all_row_splits_vec[i * 2 + 1] = this_dim0; } Array1<int32_t> all_row_splits(c, all_row_splits_vec); Array1<int32_t> all_row_ids(c, max_dim, 0); for (int32_t i = 0; i < num_srcs; ++i) { int32_t num_axes = src[i]->NumAxes(); std::vector<RaggedShapeLayer> axes; axes.reserve(num_axes); // note, the size of the `layers` of a RaggedShape // is its NumAxes() - 1. axes.resize(1); int32_t this_old_dim0 = all_row_splits_vec[i * 2 + 1]; axes[0].row_splits = all_row_splits.Range(i * 2, 2); axes[0].row_ids = all_row_ids.Range(0, this_old_dim0); axes[0].cached_tot_size = this_old_dim0; axes.insert(axes.end(), src[i]->Layers().begin(), src[i]->Layers().end()); ans.emplace_back(std::move(axes)); } return ans; } /* Internal function used in Index(), which gets certain arrays used internally. @param [in] src Source shape to be indexed @param [in] new2old Array of indexes into axis 0 of src; elements equal to -1 will be interpreted as referring to an empty list. @param [out] old_offsets Will be set to new Array2 with dimension (src.NumAxes(), new2old.Dim()), whose (i,j)'th element contains the offset into axis i of `src` where the slice of `src` with index0 (i.e. index into 0'th-axis of `src`) equal to `new2old[j]` begins. @param [out] new_offsets Will be set to new Array2 with dimension (src.NumAxes(), new2old.Dim()+1), whose (i,j)'th element contains the offset into axis i of `ans` where the data in `ans` corresponding to index j (i.e. index j into axis 0 of `ans`) begins. Note: `ans` is the result of Index(), with ans.Dim0() == new2old.Dim(). */ inline void GetOldAndNewOffsets(RaggedShape &src, const Array1<int32_t> &new2old, Array2<int32_t> *old_offsets, Array2<int32_t> *new_offsets) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(src.NumAxes(), 1); ContextPtr &c = src.Context(); int32_t num_axes = src.NumAxes(), ans_dim0 = new2old.Dim(); // max 5 layers. RowSplitsAccessor<5> row_splits_acc(src); const int32_t *new2old_data = new2old.Data(); *old_offsets = Array2<int32_t>(c, num_axes, ans_dim0); *new_offsets = Array2<int32_t>(c, num_axes, ans_dim0 + 1); auto old_offsets_acc = old_offsets->Accessor(), new_offsets_acc = new_offsets->Accessor(); // Set old_offsets; and for now, set new_offsets to the corresponding // sizes of the output slices. K2_EVAL( c, ans_dim0, lambda_set_offsets, (int32_t i)->void { // 0 <= i < ans_dim0 int32_t old_offset = new2old_data[i], old_offset_next = old_offset + 1, offset_diff = 1; // The following is a special case that interprets -1 as referring to an // empty list. In this case, old_offset == old_offset_next == 0. // The specific value 0 is not necessary; they could be equal // and have any value in [0, src.Dim0() - 1] and still refer to // the empty list. if (old_offset == -1) old_offset = 0; for (int32_t axis = 0;; axis++) { old_offsets_acc(axis, i) = old_offset; // Below, 'new_offsets_acc' currently contains the size rather // than the offset; we need to do exclusive-sum. new_offsets_acc(axis, i) = offset_diff; if (axis + 1 == num_axes) return; old_offset = row_splits_acc(axis)[old_offset]; old_offset_next = row_splits_acc(axis)[old_offset_next]; offset_diff = old_offset_next - old_offset; } }); ExclusiveSum(*new_offsets, new_offsets); } static RaggedShape IndexAxis0(RaggedShape &src, const Array1<int32_t> &new2old, Array1<int32_t> *elem_indexes /*=nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); bool is_cpu = (c->GetDeviceType() == kCpu); K2_CHECK(IsCompatible(src, new2old)); int32_t num_axes = src.NumAxes(), src_dim0 = src.Dim0(), ans_dim0 = new2old.Dim(); if (ans_dim0 == 0) { if (elem_indexes) *elem_indexes = Array1<int32_t>(c, 0); return EmptyRaggedShape(c, num_axes); } Array2<int32_t> old_offsets, // num_axes by ans_dim0 new_offsets; // num_axes by (ans_dim0 + 1). GetOldAndNewOffsets(src, new2old, &old_offsets, &new_offsets); // tot_sizes_out is of dimension (num_axes), tot_sizes_out[i] is // ans.TotSize(i) Array1<int32_t> tot_sizes_out = Array1<int32_t>(new_offsets.Col(ans_dim0)).To(GetCpuContext()); int32_t *tot_sizes_out_cpu_data = tot_sizes_out.Data(); if (elem_indexes) *elem_indexes = Array1<int32_t>(c, tot_sizes_out_cpu_data[num_axes - 1]); RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out_cpu_data); auto old_offsets_acc = old_offsets.Accessor(), new_offsets_acc = new_offsets.Accessor(); for (int32_t axis = 1; axis < num_axes; axis++) { // we are not creating the actual row_ids here, except for axis 1; we are // creating "composed row_ids" which map to the index on axis 0. Array1<int32_t> row_ids = ans.RowIds(axis); RowSplitsToRowIds(new_offsets.Row(axis), &row_ids); } ans.Layers()[0].row_splits = new_offsets.Row(1); // Caution: e.g. old_row_splits_acc(i) == src.RowSplits(i+1). RowSplitsAccessor<5> old_row_splits_acc(src), new_row_splits_acc(ans); RowIdsAccessor<5> old_row_ids_acc(src), new_row_ids_acc(ans); SmallVec<int32_t, 6> tot_sizes; K2_CHECK(num_axes <= 6); int32_t max_tot_size = 0; for (int32_t i = 0; i < num_axes; i++) { tot_sizes.data[i] = tot_sizes_out_cpu_data[i]; max_tot_size = std::max<int32_t>(max_tot_size, tot_sizes.data[i]); } int32_t *elem_indexes_data = (elem_indexes != nullptr ? elem_indexes->Data() : nullptr); // Note, the first row_splits vector was set above, ans.Layers()[0].row_splits // = new_offsets.Row(1). auto lambda_set_row_splits_and_ids = [=] __host__ __device__( int32_t axis, int32_t i) -> void { axis++; // make it one-based. int32_t tot_size = tot_sizes(axis); // == new_offsets_acc(axis, ans_dim0); if (i > tot_size) return; int32_t *composed_row_ids_data = new_row_ids_acc(axis - 1); int32_t ans_idx0 = (i == tot_size ? ans_dim0 : composed_row_ids_data[i]), job_begin = new_offsets_acc(axis, ans_idx0), job_this_idx0 = i - job_begin; K2_CHECK_GE(job_this_idx0, 0); int32_t row_split_value = 0, new_next_offset = 0; if (axis + 1 < num_axes) new_next_offset = new_offsets_acc(axis + 1, ans_idx0); if (i < tot_size) { // "prev" means for axis - 1 int32_t new_prev_offset = new_offsets_acc(axis - 1, ans_idx0), old_prev_offset = old_offsets_acc(axis - 1, ans_idx0), old_offset = old_offsets_acc(axis, ans_idx0), old_idx = old_offset + job_this_idx0; if (axis != 1) { // Write row-ids. // Actually doing this for axis == 1 is harmless, but unnecessary, as it // would write back the same values that were already there. We avoid // the memory access. // this_new_row_ids = new_row_ids_acc(axis - 1); int32_t *this_new_row_ids = composed_row_ids_data; const int32_t *this_old_row_ids = old_row_ids_acc(axis - 1); int32_t old_row_id = this_old_row_ids[old_idx], new_row_id = old_row_id + new_prev_offset - old_prev_offset; this_new_row_ids[i] = new_row_id; } if (elem_indexes_data != nullptr && axis == num_axes - 1) elem_indexes_data[i] = old_idx; if (axis + 1 < num_axes) { int32_t old_next_offset = old_offsets_acc(axis + 1, ans_idx0), next_offset_diff = new_next_offset - old_next_offset; const int32_t *old_row_splits_data = old_row_splits_acc(axis); row_split_value = next_offset_diff + old_row_splits_data[old_idx]; } } else { row_split_value = new_next_offset; } if (axis + 1 < num_axes) { int32_t *new_row_splits_data = new_row_splits_acc(axis); new_row_splits_data[i] = row_split_value; } }; constexpr int32_t cutoff = 50000; if (c->GetDeviceType() == kCpu) { for (int32_t axis = 0; axis < num_axes - 1; axis++) { int32_t this_size = tot_sizes(axis + 1); for (int32_t i = 0; i <= this_size; i++) lambda_set_row_splits_and_ids(axis, i); } } else if (max_tot_size * (num_axes - 1) < cutoff) { Eval2Device(c, num_axes - 1, max_tot_size + 1, lambda_set_row_splits_and_ids); } else { // Loop in the kernel rather than submitting an excessive number of threads. auto lambda_loop = [=] __device__(int32_t i) { for (int32_t axis = 0; axis < num_axes - 1; axis++) { lambda_set_row_splits_and_ids(axis, i); } }; EvalDevice(c, max_tot_size + 1, lambda_loop); } #if !defined(NDEBUG) ans.Check(); #endif return ans; } RaggedShape Index(RaggedShape &src, int32_t axis, const Array1<int32_t> &indexes, Array1<int32_t> *elem_indexes /*=nullptr*/) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); K2_CHECK_LT(static_cast<uint32_t>(axis), static_cast<uint32_t>(num_axes)); if (axis == 0) { return IndexAxis0(src, indexes, elem_indexes); } else if (axis == src.NumAxes() - 1) { // This code is related to SubsampleRaggedShape(). `indexes` corresponds // to `new2old`. Array1<int32_t> last_row_ids = src.RowIds(num_axes - 1)[indexes]; #ifndef NDEBUG if (!IsMonotonic(last_row_ids)) { K2_LOG(FATAL) << "Invalid indexes used when indexing RaggedShape"; } #endif Array1<int32_t> last_row_splits(last_row_ids.Context(), src.TotSize(num_axes - 2) + 1); RowIdsToRowSplits(last_row_ids, &last_row_splits); if (elem_indexes) *elem_indexes = indexes; std::vector<RaggedShapeLayer> axes = src.Layers(); axes.back().row_splits = last_row_splits; axes.back().row_ids = last_row_ids; axes.back().cached_tot_size = last_row_ids.Dim(); // TODO: disable checking by changing true to false. return RaggedShape(axes, true); } else { RaggedShape top, bottom; DecomposeRaggedShape(src, axis, &top, &bottom); RaggedShape top_indexed = Index(top, axis, indexes, nullptr), bottom_indexed = IndexAxis0(bottom, indexes, elem_indexes); return ComposeRaggedShapes(top_indexed, bottom_indexed); } } // returns array of dim (src[0]->NumAxes() + 1) by (num_srcs + 1), // see documentation in header. Array2<int32_t> GetOffsets(int32_t num_srcs, RaggedShape **src) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_srcs, 0); int32_t num_axes_in = src[0]->NumAxes(); ContextPtr &ctx = src[0]->Context(); Array2<int32_t> src_offsets(GetCpuContext(), num_axes_in + 1, num_srcs + 1); int32_t *src_offsets_data = src_offsets.Data(); int32_t src_offsets_stride0 = src_offsets.ElemStride0(); // Check if they have same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in); K2_CHECK(ctx->IsCompatible(*src[i]->Context())); } for (int32_t axis = 0; axis <= num_axes_in; ++axis) { int32_t sum = 0; for (int32_t i = 0; i <= num_srcs; ++i) { // i is the column src_offsets_data[axis * src_offsets_stride0 + i] = sum; if (i < num_srcs) { sum += (axis == 0 ? 1 : src[i]->TotSize(axis - 1)); } } } return src_offsets; } void GetRowInfo(RaggedShape &src, Array1<int32_t *> *row_splits, Array1<int32_t *> *row_ids) { NVTX_RANGE(K2_FUNC); int32_t axes = src.NumAxes(); K2_CHECK_GE(axes, 2); src.Populate(); std::vector<int32_t *> row_splits_ptrs(axes - 1); std::vector<int32_t *> row_ids_ptrs(axes - 1); for (int32_t i = 1; i != axes; ++i) { row_splits_ptrs[i - 1] = src.RowSplits(i).Data(); row_ids_ptrs[i - 1] = src.RowIds(i).Data(); } ContextPtr ctx = src.Context(); *row_splits = Array1<int32_t *>(ctx, row_splits_ptrs); *row_ids = Array1<int32_t *>(ctx, row_ids_ptrs); } void GetRowInfoMulti(int32_t num_srcs, RaggedShape **src, Array2<int32_t *> *row_splits, Array2<int32_t *> *row_ids) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_srcs, 0); int32_t num_axes_in = src[0]->NumAxes(); K2_CHECK_GE(num_axes_in, 2); ContextPtr ctx = src[0]->Context(); // check if they have same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in); K2_CHECK(ctx->IsCompatible(*src[i]->Context())); } Array2<int32_t *> row_splits_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs); Array2<int32_t *> row_ids_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs); int32_t **splits_ptr_data = row_splits_ptrs.Data(); int32_t **ids_ptr_data = row_ids_ptrs.Data(); int32_t stride0 = row_splits_ptrs.ElemStride0(); K2_CHECK_EQ(stride0, row_ids_ptrs.ElemStride0()); for (int32_t axis = 0; axis != num_axes_in - 1; ++axis) { for (int32_t i = 0; i != num_srcs; ++i) { splits_ptr_data[axis * stride0 + i] = src[i]->RowSplits(axis + 1).Data(); ids_ptr_data[axis * stride0 + i] = src[i]->RowIds(axis + 1).Data(); } } *row_splits = row_splits_ptrs.To(ctx); *row_ids = row_ids_ptrs.To(ctx); } static RaggedShape StackAxis0(int32_t num_srcs, RaggedShape **src, Array1<uint32_t> *merge_map /* == nullptr*/) { NVTX_RANGE(K2_FUNC); if (num_srcs == 1) { if (merge_map) *merge_map = Arange<uint32_t>(src[0]->Context(), 0, src[0]->NumElements()); RaggedShape top_layer = TrivialShape(src[0]->Context(), src[0]->Dim0()); return ComposeRaggedShapes(top_layer, **src); } // We can't handle num_srcs == 0 because we won't have a context object. K2_CHECK_GT(num_srcs, 1); int32_t num_axes_in = src[0]->NumAxes(), num_axes_out = num_axes_in + 1; ContextPtr c = src[0]->Context(); bool is_cpu = (c->GetDeviceType() == kCpu); // Check if they have same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(num_axes_in, src[i]->NumAxes()); K2_CHECK(IsCompatible(*src[0], *src[i])); } // `offsets` will be on CPU for now. // It shape is (num_axes_in + 1 == num_axes_out, num_srcs + 1). Array2<int32_t> offsets = GetOffsets(num_srcs, src); auto offsets_acc = offsets.Accessor(); SmallVec<int32_t, 6> tot_sizes_out; K2_CHECK(num_axes_out <= 6); int32_t max_tot_size = 0; for (int32_t axis = 0; axis < num_axes_out; axis++) { tot_sizes_out.data[axis] = offsets_acc(axis, num_srcs); max_tot_size = std::max<int32_t>(max_tot_size, tot_sizes_out.data[axis]); } RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes_out, tot_sizes_out.data); // src_row_splits and src_row_ids are of dim num_axes_in-1 by num_srcs. Array2<int32_t *> src_row_splits, src_row_ids; GetRowInfoMulti(num_srcs, src, &src_row_splits, &src_row_ids); auto src_row_splits_acc = src_row_splits.Accessor(), src_row_ids_acc = src_row_ids.Accessor(); offsets = offsets.To(c); offsets_acc = offsets.Accessor(); for (int32_t axis = 1; axis < num_axes_out; axis++) { // we are not creating the actual row_ids here, except for axis 1; we are // creating "composed row_ids" which map to the index on axis 0. Array1<int32_t> row_ids = ans.RowIds(axis); RowSplitsToRowIds(offsets.Row(axis), &row_ids); } ans.Layers()[0].row_splits = offsets.Row(1); // Caution: e.g. old_row_splits_acc(i) == src.RowSplits(i+1). RowSplitsAccessor<5> new_row_splits_acc(ans); RowIdsAccessor<5> new_row_ids_acc(ans); uint32_t *merge_map_data; if (merge_map != nullptr) { *merge_map = Array1<uint32_t>(c, tot_sizes_out.data[num_axes_out - 1]); merge_map_data = merge_map->Data(); } else { merge_map_data = nullptr; } // Note, the first row_splits vector was set above, ans.Layers()[0].row_splits // = new_offsets.Row(1). auto lambda_set_row_splits_and_ids = [=] __host__ __device__( int32_t axis, int32_t i) -> void { ++axis; // We want this to be called starting with axis == 1, but Eval2 // doesn't suppor that. // At this point, 1 < axis < num_axes_out. // This kernel will be writing one or both of: // the row-splits for output-layer==`axis`/input-layer==`axis-1`, // the row-ids for output-layer=`axis-1`/input-layer==`axis-2`. int32_t tot_size = tot_sizes_out(axis); // == offsets_acc(axis, num_srcs); if (i > tot_size) return; int32_t *composed_row_ids_data = new_row_ids_acc(axis - 1); int32_t ans_idx0 = (i == tot_size ? num_srcs : composed_row_ids_data[i]), // note: ans_idx0 == src_idx. job_begin = offsets_acc(axis, ans_idx0), job_this_idx0 = i - job_begin; K2_CHECK_GE(job_this_idx0, 0); int32_t row_split_value = 0, new_next_offset = 0; uint32_t *merge_map_data_local = nullptr; if (axis + 1 < num_axes_out) { new_next_offset = offsets_acc(axis + 1, ans_idx0); } else { merge_map_data_local = merge_map_data; } if (i < tot_size) { // "prev" means for axis - 1 int32_t new_prev_offset = offsets_acc(axis - 1, ans_idx0); if (axis != 1) { // Write row-ids. // this_new_row_ids = new_row_ids_acc(axis - 1); int32_t *this_new_row_ids = composed_row_ids_data; const int32_t *this_src_row_ids = src_row_ids_acc(axis - 2, ans_idx0); int32_t old_row_id = this_src_row_ids[job_this_idx0], new_row_id = old_row_id + new_prev_offset; this_new_row_ids[i] = new_row_id; } if (merge_map_data_local != nullptr) { merge_map_data_local[i] = ans_idx0 + num_srcs * job_this_idx0; } if (axis + 1 < num_axes_out) { const int32_t *src_row_splits_data = src_row_splits_acc(axis - 1, ans_idx0); int32_t old_row_split = src_row_splits_data[job_this_idx0]; row_split_value = new_next_offset + old_row_split; } } else { row_split_value = new_next_offset; } if (axis + 1 < num_axes_out) { int32_t *new_row_splits_data = new_row_splits_acc(axis); new_row_splits_data[i] = row_split_value; } }; constexpr int32_t cutoff = 50000; if (c->GetDeviceType() == kCpu) { for (int32_t axis = 0; axis < num_axes_out - 1; axis++) { int32_t this_size = tot_sizes_out(axis + 1); for (int32_t i = 0; i <= this_size; i++) lambda_set_row_splits_and_ids(axis, i); } } else if (max_tot_size * (num_axes_out - 1) < cutoff) { Eval2Device(c, num_axes_out - 1, max_tot_size + 1, lambda_set_row_splits_and_ids); } else { // Loop in the kernel rather than submitting an excessive number of threads. auto lambda_loop = [=] __device__(int32_t i) { for (int32_t axis = 0; axis < num_axes_out - 1; axis++) { lambda_set_row_splits_and_ids(axis, i); } }; EvalDevice(c, max_tot_size + 1, lambda_loop); } #if !defined(NDEBUG) ans.Check(); #endif return ans; } RaggedShape Cat(int32_t axis, int32_t num_srcs, RaggedShape **src, Array1<uint32_t> *merge_map /* == nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_srcs, 0); if (axis == 0) { RaggedShape temp = StackAxis0(num_srcs, src, merge_map); std::vector<RaggedShapeLayer> ans_layers( temp.Layers().begin() + 1, temp.Layers().end()); return RaggedShape(ans_layers, false); } K2_CHECK_LT(static_cast<uint32_t>(axis), static_cast<uint32_t>(src[0]->NumAxes())); int32_t num_axes = src[0]->NumAxes(); std::vector<RaggedShapeLayer> ans_layers(num_axes - 1); // If axis >= 2, some layers of `src` will pass through unchanged (we should // check that they are identical across all sources). for (int32_t l = 0; l + 1 < axis; l++) { CheckLayerEqual(l, num_srcs, src); ans_layers[l] = src[0]->Layers()[l]; } Array1<uint32_t> merge_map_local; Array1<uint32_t> *this_m = (axis + 1 == num_axes ? merge_map : &merge_map_local); RaggedShape s = IntersperseRaggedLayer(axis - 1, num_srcs, src, this_m), t = SubsampleRaggedLayer(s, 0, num_srcs); ans_layers[axis - 1] = t.Layers()[0]; for (int32_t l = axis; l + 1 < num_axes; l++) { Array1<uint32_t> merge_map_next; Array1<uint32_t> *this_m = (l + 2 == num_axes ? merge_map : &merge_map_next); RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m); ans_layers[l] = r.Layers()[0]; merge_map_local = merge_map_next; } // TODO(dan) after this is debugged: add ", false". return RaggedShape(ans_layers); } RaggedShape RemoveAxis(RaggedShape &src, int32_t axis) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(src.NumAxes(), 2); K2_CHECK(axis >= 0 && axis < src.NumAxes()); // note, `axes_in` is of dim src.NumAxes() - 1. // Also note: axes_in[i] pertains to the relationship between // axes i and i+1 in the source. src.Populate(); const std::vector<RaggedShapeLayer> &axes_in = src.Layers(); std::vector<RaggedShapeLayer> axes_out(axes_in.size() - 1); int32_t axes_out_size = static_cast<int32_t>(axes_out.size()); for (int32_t i = 0; i < axis - 1; ++i) axes_out[i] = axes_in[i]; if (axis > 0 && axis + 1 < src.NumAxes()) { axes_out[axis - 1].row_ids = axes_in[axis - 1].row_ids[axes_in[axis].row_ids]; axes_out[axis - 1].row_splits = axes_in[axis].row_splits[axes_in[axis - 1].row_splits]; axes_out[axis - 1].cached_tot_size = axes_out[axis - 1].row_ids.Dim(); } for (int32_t i = axis; i < axes_out_size; ++i) axes_out[i] = axes_in[i + 1]; return RaggedShape(axes_out); } RaggedShape MakeTransposable(RaggedShape &src) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(src.NumAxes(), 2); int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1); if (src_dim0 <= 1) return src; ContextPtr c = src.Context(); int32_t num_axes = src.NumAxes(); int32_t max_size = src.MaxSize(1); if (max_size <= 0) return src; int32_t ans_tot_size1 = max_size * src_dim0; src.Populate(); const std::vector<RaggedShapeLayer> &axes_in = src.Layers(); std::vector<RaggedShapeLayer> axes_out(num_axes - 1); const int32_t *src_row_splits1_data = src.RowSplits(1).Data(); const int32_t *src_row_ids1_data = src.RowIds(1).Data(); { ParallelRunner pr(c); RaggedShapeLayer &axis1_shape = axes_out[0]; { // set ans.RowSplits(1); With w(pr.NewStream()); axis1_shape.row_splits = Range(c, src_dim0 + 1, 0, max_size); } { // set ans.RowIds(1); With w(pr.NewStream()); axis1_shape.row_ids = Array1<int32_t>(c, ans_tot_size1); int32_t *row_ids1_data = axis1_shape.row_ids.Data(); axis1_shape.cached_tot_size = ans_tot_size1; K2_EVAL( c, ans_tot_size1, lambda_set_row_ids1, (int32_t i)->void { row_ids1_data[i] = i / max_size; }); } if (num_axes > 2) { RaggedShapeLayer &axis2_shape = axes_out[1]; const int32_t *src_row_splits2_data = src.RowSplits(2).Data(); { // set ans.RowSplits(2); With w(pr.NewStream()); axis2_shape.cached_tot_size = src.TotSize(2); axis2_shape.row_splits = Array1<int32_t>(c, ans_tot_size1 + 1); int32_t *ans_row_splits2_data = axis2_shape.row_splits.Data(); K2_EVAL( c, ans_tot_size1 + 1, lambda_set_row_splits2, (int32_t idx01)->void { if (idx01 == ans_tot_size1) { ans_row_splits2_data[idx01] = src_row_splits2_data[src_tot_size1]; return; } int32_t idx0 = idx01 / max_size, idx1 = idx01 % max_size; int32_t idx0x = src_row_splits1_data[idx0], idx0x_next = src_row_splits1_data[idx0 + 1]; int32_t num_elems_this_row = idx0x_next - idx0x; if (idx1 < num_elems_this_row) ans_row_splits2_data[idx01] = src_row_splits2_data[idx0x + idx1]; else ans_row_splits2_data[idx01] = src_row_splits2_data[idx0x_next]; // append empty row }); } { // set ans.RowIds(2); With w(pr.NewStream()); int32_t tot_size2 = src.TotSize(2); axis2_shape.row_ids = Array1<int32_t>(c, tot_size2); int32_t *ans_row_ids2_data = axis2_shape.row_ids.Data(); const int32_t *src_row_ids2_data = src.RowIds(2).Data(); K2_EVAL( c, tot_size2, lambda_set_row_ids2, (int32_t idx012)->void { int32_t src_idx01 = src_row_ids2_data[idx012]; int32_t src_idx0 = src_row_ids1_data[src_idx01]; int32_t src_idx1 = src_idx01 - src_row_splits1_data[src_idx0]; ans_row_ids2_data[idx012] = (src_idx0 * max_size) + src_idx1; }); } } } // copy left row_splits and row_ids; for (int32_t i = 2; i < num_axes - 1; ++i) axes_out[i] = axes_in[i]; return RaggedShape(axes_out); } // transpose axes 0 and 1. RaggedShape Transpose(RaggedShape &src, Array1<int32_t> *value_indexes) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(src.NumAxes(), 2); ContextPtr c = src.Context(); int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1); if (src_dim0 <= 0) return src; int32_t src_dim1 = src_tot_size1 / src_dim0; K2_CHECK_EQ(src_tot_size1 % src_dim0, 0) << "Transpose(): all dims on axis 0 must be the same.\n" << "src_tot_size1: " << src_tot_size1 << "\n" << "src_dim0: " << src_dim0 << ", array is: " << src; K2_DCHECK( Equal(src.RowSplits(1), Range(c, src.RowSplits(1).Dim(), 0, src_dim1))) << " Expected row-splits to be evenly spaced: " << src.RowSplits(1); RaggedShape src_no_axis0 = RemoveAxis(src, 0); K2_CHECK_EQ(src_no_axis0.Dim0(), src_tot_size1); // `renumbering` is a `new2old` map, that maps from the first index in // src_no_axis0_renumbered // to the first index into src_no_axis0. Array1<int32_t> renumbering(c, src_tot_size1); int32_t *renumbering_data = renumbering.Data(); K2_EVAL( c, src_tot_size1, lambda_set_renumbering, (int32_t i)->void { int32_t j = i % src_dim0, k = i / src_dim0, i_old = j * src_dim1 + k; renumbering_data[i] = i_old; }); RaggedShape src_no_axis0_renumbered = Index(src_no_axis0, 0, renumbering, value_indexes); int32_t num_rows = src_dim1, row_splits_dim = num_rows + 1, row_ids_dim = src_tot_size1; std::vector<RaggedShapeLayer> ans_axis0(1); Array1<int32_t> mem(c, row_splits_dim + row_ids_dim); int32_t *mem_data = mem.Data(); K2_EVAL( c, row_splits_dim + row_ids_dim, lambda_set_row_info, (int32_t i)->void { int32_t val; if (i >= row_splits_dim) { // row_ids int32_t elem_idx = i - row_splits_dim; val = elem_idx / src_dim0; } else { // row_splits int32_t row_idx = i; val = row_idx * src_dim0; } mem_data[i] = val; }); ans_axis0[0].row_splits = mem.Range(0, row_splits_dim); ans_axis0[0].row_ids = mem.Range(row_splits_dim, row_ids_dim); ans_axis0[0].cached_tot_size = row_ids_dim; RaggedShape temp(ans_axis0); return ComposeRaggedShapes(temp, src_no_axis0_renumbered); } RaggedShape Stack(int32_t axis, int32_t num_srcs, RaggedShape **src, Array1<uint32_t> *merge_map /* = nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_srcs, 0); K2_CHECK_LT(static_cast<uint32_t>(axis), static_cast<uint32_t>(src[0]->NumAxes())); ContextPtr c = src[0]->Context(); if (axis == 0) { return StackAxis0(num_srcs, src, merge_map); } K2_CHECK_LT(static_cast<uint32_t>(axis), static_cast<uint32_t>(src[0]->NumAxes())); int32_t num_axes = src[0]->NumAxes(); std::vector<RaggedShapeLayer> ans_layers(num_axes); // If axis >= 2, some layers of `src` will pass through unchanged (we should // check that they are identical across all sources). for (int32_t l = 0; l + 1 < axis; l++) { CheckLayerEqual(l, num_srcs, src); ans_layers[l] = src[0]->Layers()[l]; } Array1<uint32_t> merge_map_local; Array1<uint32_t> *this_m = (axis + 1 == num_axes ? merge_map : &merge_map_local); RaggedShape s = IntersperseRaggedLayer(axis - 1, num_srcs, src, this_m); // note: s.Dim0() will be a multiple of num_srcs. ans_layers[axis - 1] = RegularRaggedShape(c, s.Dim0() / num_srcs, num_srcs).Layers()[0]; ans_layers[axis] = s.Layers()[0]; for (int32_t l = axis; l + 1 < num_axes; l++) { Array1<uint32_t> merge_map_next; Array1<uint32_t> *this_m = (l + 2 == num_axes ? merge_map : &merge_map_next); RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m); ans_layers[l + 1] = r.Layers()[0]; merge_map_local = merge_map_next; } // TODO(dan) after this is debugged: add ", false". return RaggedShape(ans_layers); } RaggedShape Merge(int32_t num_srcs, RaggedShape **src, const Array1<uint32_t> &merge_map, Array1<uint32_t> *merge_map_out) { NVTX_RANGE(K2_FUNC); K2_CHECK(num_srcs > 0); int32_t num_layers = src[0]->NumAxes() - 1; std::vector<RaggedShapeLayer> ans_layers(num_layers); // Note: this is a shallow copy. Array1<uint32_t> merge_map_local = merge_map; for (int32_t l = 0; l < num_layers; l++) { Array1<uint32_t> merge_map_next; Array1<uint32_t> *this_m = (l + 1 == num_layers ? merge_map_out : &merge_map_next); RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m); ans_layers[l] = r.Layers()[0]; merge_map_local = merge_map_next; } // TODO(dan) after this is debugged: add ", false". return RaggedShape(ans_layers); } RaggedShape TrivialShape(ContextPtr &c, int32_t num_elems) { NVTX_RANGE(K2_FUNC); // row_splits= [ Array1<int32_t> row_splits = Range<int32_t>(c, 2, 0, num_elems); Array1<int32_t> row_ids(c, num_elems, 0); return RaggedShape2(&row_splits, &row_ids, num_elems); } RaggedShape RegularRaggedShape(ContextPtr &c, int32_t dim0, int32_t dim1) { NVTX_RANGE(K2_FUNC); Array1<int32_t> row_splits = Range<int32_t>(c, dim0 + 1, 0, dim1); Array1<int32_t> row_ids(c, dim0 * dim1); int32_t *row_ids_data = row_ids.Data(); K2_EVAL2( c, dim0, dim1, lambda_set_row_ids, (int32_t i, int32_t j)->void { row_ids_data[i * dim1 + j] = i; }); return RaggedShape2(&row_splits, &row_ids, dim0 * dim1); } Ragged<int32_t> GetCountsPartitioned(Ragged<int32_t> &src, RaggedShape &ans_ragged_shape) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(src.NumAxes(), 2); K2_CHECK_EQ(ans_ragged_shape.NumAxes(), 2); K2_CHECK(IsCompatible(src, ans_ragged_shape)); K2_CHECK_EQ(src.Dim0(), ans_ragged_shape.Dim0()); const Array1<int32_t> &values = src.values; const Array1<int32_t> &row_splits = ans_ragged_shape.RowSplits(1); int32_t n = ans_ragged_shape.NumElements(); Array1<int32_t> counts = GetCounts(values, n); return Ragged<int32_t>(ans_ragged_shape, counts); } static Array1<int32_t> GetTransposeReorderingCpu(Ragged<int32_t> &src, int32_t num_cols) { NVTX_RANGE(K2_FUNC); std::vector<std::vector<int32_t>> column_indexes(num_cols); // [column][row] const int32_t *values_data = src.values.Data(); int32_t n = src.values.Dim(); for (int32_t i = 0; i != n; ++i) { int32_t bucket = values_data[i]; column_indexes[bucket].push_back(i); } Array1<int32_t> ans(src.Context(), n); int32_t *ans_data = ans.Data(); for (int32_t i = 0; i != num_cols; ++i) { std::copy(column_indexes[i].begin(), column_indexes[i].end(), ans_data); ans_data += column_indexes[i].size(); } return ans; } static Array1<int32_t> GetTransposeReorderingThreeAxesCuda(Ragged<int32_t> &src, int32_t num_cols) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(src.NumAxes(), 3); ContextPtr &context = src.Context(); K2_CHECK_EQ(context->GetDeviceType(), kCuda); const Array1<int32_t> &row_splits1 = src.RowSplits(1); const int32_t *row_ids2_data = src.RowIds(2).Data(); const int32_t *value_data = src.values.Data(); Array1<int32_t> segments = src.RowSplits(2)[row_splits1]; auto lambda_comp = [=] __device__(int32_t a_idx012, int32_t b_idx012) -> bool { int32_t a_col_index = value_data[a_idx012]; int32_t b_col_index = value_data[b_idx012]; if (a_col_index < b_col_index) return true; // sort by column indexes if (a_col_index > b_col_index) return false; // at this point, a_idx012 and b_idx012 belong to the same column; // then we sort by its row indexes int32_t a_idx01 = row_ids2_data[a_idx012]; int32_t b_idx01 = row_ids2_data[b_idx012]; if (a_idx01 < b_idx01) return true; if (a_idx01 > b_idx01) return false; // at this point, a_idx012 and b_idx012 are duplicate elements return false; // either true or false is fine }; mgpu::context_t *mgpu_context = GetModernGpuAllocator(context); int32_t n = src.values.Dim(); Array1<int32_t> ans = Range(context, n, 0); if (n == 0) return ans; K2_CUDA_SAFE_CALL(mgpu::segmented_sort(ans.Data(), // keys ans.Dim(), // count segments.Data(), // segments segments.Dim() - 1, // num_segments lambda_comp, *mgpu_context)); return ans; } /* // Checks the result of GetTranspoeReordering(), in debug mode and dies if it is wrong. static void CheckGetTransposeReordering(Ragged<int32_t> &src, Array1<int32_t> &ans) { if (!internal::kDisableDebug && !internal::DisableChecks()) { K2_CHECK(IsPermutation(ans)); K2_CHECK(IsMonotonic(src.values[ans])); } }*/ Array1<int32_t> GetTransposeReordering(Ragged<int32_t> &src, int32_t num_cols) { NVTX_RANGE(K2_FUNC); ContextPtr &context = src.Context(); if (src.NumAxes() < 2 || src.values.Dim() == 0) { // src is empty return Array1<int32_t>(context, 0); } DeviceType device_type = context->GetDeviceType(); if (device_type == kCpu) return GetTransposeReorderingCpu(src, num_cols); K2_CHECK_EQ(device_type, kCuda); (void)GetTransposeReorderingThreeAxesCuda; // remove compiler warnings #if __CUDACC_VER_MAJOR__ > 10 || \ (__CUDACC_VER_MAJOR__ == 10 && \ (__CUDACC_VER_MINOR__ > 1 || \ (__CUDACC_VER_MINOR__ == 1 && __CUDACC_VER_BUILD__ > 105))) // Enable it only for NVCC > 10.1.105 // // Refer to https://github.com/LLNL/axom/issues/88 // NVCC 10.1.105 has a known issue for cub::DeviceRadixSort int32_t num_buckets = num_cols; int32_t num_elements = src.values.Dim(); int32_t log_buckets = static_cast<int32_t>(ceilf(log2f(num_buckets))); Array1<int32_t> order = Range(context, num_elements, 0); Array1<int32_t> src_tmp_out(context, num_elements); Array1<int32_t> ans(context, num_elements); cudaStream_t stream = context->GetCudaStream(); size_t temp_storage_bytes = 0; K2_CUDA_SAFE_CALL(cub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, src.values.Data(), src_tmp_out.Data(), order.Data(), ans.Data(), num_elements, 0, log_buckets, stream)); Array1<int8_t> d_temp_storage(context, temp_storage_bytes); K2_CUDA_SAFE_CALL(cub::DeviceRadixSort::SortPairs( d_temp_storage.Data(), temp_storage_bytes, src.values.Data(), src_tmp_out.Data(), order.Data(), ans.Data(), num_elements, 0, log_buckets, stream)); // CheckGetTransposeReordering(src, ans); return ans; #else if (src.NumAxes() == 3) { Array1<int32_t> ans = GetTransposeReorderingThreeAxesCuda(src, num_cols); // CheckGetTransposeReordering(src, ans); return ans; } const int32_t *row_splits1_data = src.RowSplits(src.NumAxes() - 1).Data(); const int32_t *row_ids1_data = src.RowIds(src.NumAxes() - 1).Data(); const int32_t *value_data = src.values.Data(); int32_t n = src.values.Dim(); Array1<int32_t> ans = Range(context, n, 0); if (n == 0) return ans; auto lambda_comp = [=] __device__(int32_t a_idx01, int32_t b_idx01) -> bool { int32_t a_idx0 = row_ids1_data[a_idx01]; int32_t b_idx0 = row_ids1_data[b_idx01]; int32_t a_col_index = value_data[a_idx01]; int32_t b_col_index = value_data[b_idx01]; if (a_col_index < b_col_index) return true; // sort by column indexes if (a_col_index > b_col_index) return false; // now we have a_col_index == b_col_index if (a_idx0 < b_idx0) return true; // sort by row indexes if (a_idx0 > b_idx0) return false; // now we have a_idx0 == b_idx0 && a_col_index == b_col_index // this entry is duplicated in the sparse matrix. return false; // we can return either true or false here. }; mgpu::context_t *mgpu_context = GetModernGpuAllocator(context); K2_CUDA_SAFE_CALL(mgpu::mergesort(ans.Data(), n, lambda_comp, *mgpu_context)); // CheckGetTransposeReordering(src, ans); return ans; #endif } RaggedShape ChangeSublistSize(const RaggedShape &src, int32_t size_delta) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(src.NumAxes(), 2); // the result will have the same num-axes as `src` (the NumAxes() of the // object is not the same as the number of RaggedShapeLayer axes). std::vector<RaggedShapeLayer> ans_axes(src.NumAxes() - 1); int32_t last_axis = src.NumAxes() - 1; // The following will only do something if src.NumAxes() > 2. for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Layers()[i]; ContextPtr &c = src.Context(); int32_t num_rows = src.TotSize(last_axis - 1), src_num_elems = src.TotSize(last_axis), num_elems = src_num_elems + size_delta * num_rows; ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1); ans_axes.back().row_ids = Array1<int32_t>(c, num_elems); ans_axes.back().cached_tot_size = num_elems; const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data(), *src_row_ids_data = src.RowIds(last_axis).Data(); int32_t *row_splits_data = ans_axes.back().row_splits.Data(), *row_ids_data = ans_axes.back().row_ids.Data(); { ParallelRunner pr(c); { With w(pr.NewStream()); K2_EVAL( c, num_rows + 1, lambda_set_row_splits, (int32_t idx0)->void { row_splits_data[idx0] = src_row_splits_data[idx0] + size_delta * idx0; }); } { With w(pr.NewStream()); K2_EVAL( c, src_num_elems, lambda_set_row_ids1, (int32_t src_idx01)->void { int32_t src_idx0 = src_row_ids_data[src_idx01], src_idx0x = src_row_splits_data[src_idx0], src_idx1 = src_idx01 - src_idx0x, new_idx0x = row_splits_data[src_idx0], new_idx0x_next = row_splits_data[src_idx0 + 1], new_idx01 = new_idx0x + src_idx1; // it's only necessary to guard the next statement with in 'if' // because size_delta might be negative. if (new_idx01 < new_idx0x_next) row_ids_data[new_idx01] = src_idx0; }); } if (size_delta > 0) { // This sets the row-ids that are not set by lambda_set_row_ids1. With w(pr.NewStream()); K2_EVAL( c, num_rows * size_delta, lambda_set_row_ids2, (int32_t i)->void { int32_t idx0 = i / size_delta, n = i % size_delta, next_idx0 = idx0 + 1; // The following formula is the same as the one in // lambda_set_row_splits; we want to compute the new value of // row_splits_data[next_idx0] without waiting for that kernel to // terminate. int32_t next_idx0x = src_row_splits_data[next_idx0] + size_delta * next_idx0; row_ids_data[next_idx0x - 1 - n] = idx0; }); } // make the ParallelRunner go out of scope (should do this before any // validation code that gets invoked by the constructor of RaggedShape // below). } return RaggedShape(ans_axes); } // TODO(dan): this could definitely be made more efficient. RaggedShape ChangeSublistSizePinned(RaggedShape &src, int32_t size_delta) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(src.NumAxes(), 2); // the result will have the same num-axes as `src` (the NumAxes() of the // object is not the same as the number of RaggedShapeLayer axes). std::vector<RaggedShapeLayer> ans_axes(src.NumAxes() - 1); int32_t last_axis = src.NumAxes() - 1; // The following will only do something if src.NumAxes() > 2. for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Layers()[i]; ContextPtr &c = src.Context(); int32_t num_rows = src.TotSize(last_axis - 1); ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1); const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data(); int32_t *row_splits_data = ans_axes.back().row_splits.Data(); K2_EVAL( c, num_rows, lambda_set_row_sizes, (int32_t idx0)->void { int32_t orig_size = src_row_splits_data[idx0 + 1] - src_row_splits_data[idx0], size; if (orig_size == 0 || orig_size + size_delta <= 0) size = 0; else size = orig_size + size_delta; row_splits_data[idx0] = size; }); ExclusiveSum(ans_axes.back().row_splits, &ans_axes.back().row_splits); ans_axes.back().row_ids = Array1<int32_t>(c, ans_axes.back().row_splits.Back()); RowSplitsToRowIds(ans_axes.back().row_splits, &ans_axes.back().row_ids); ans_axes.back().cached_tot_size = ans_axes.back().row_ids.Dim(); return RaggedShape(ans_axes); } RaggedShape Prefix(RaggedShape &src, int32_t n) { NVTX_RANGE(K2_FUNC); int32_t dim0 = src.Dim0(); K2_CHECK(n >= 0 && n <= dim0); src.Populate(); int32_t num_axes = src.NumAxes(); K2_CHECK_GE(num_axes, 2); const std::vector<RaggedShapeLayer> &axes_in = src.Layers(); std::vector<RaggedShapeLayer> axes_out(axes_in.size()); int32_t row_end = n; for (int32_t axis = 0; axis < num_axes - 1; ++axis) { axes_out[axis].row_splits = axes_in[axis].row_splits.Arange(0, row_end + 1); // notice here we may do a memory copy from GPU to CPU. row_end = axes_in[axis].row_splits[row_end]; axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end); axes_out[axis].cached_tot_size = row_end; } return RaggedShape(axes_out); } std::vector<RaggedShape> GetPrefixes(RaggedShape &src, const std::vector<int32_t> &sizes) { NVTX_RANGE(K2_FUNC); src.Populate(); int32_t dim0 = src.Dim0(); int32_t num_axes = src.NumAxes(); K2_CHECK_GE(num_axes, 2); ContextPtr &c = src.Context(); const std::vector<RaggedShapeLayer> &axes_in = src.Layers(); // get those row_end elements at each axis. int32_t ans_size = static_cast<int32_t>(sizes.size()); Array1<int32_t> row_ends(c, num_axes * ans_size); Array1<int32_t> sizes_array(GetCpuContext(), sizes); Array1<int32_t> indexes = row_ends.Arange(0, ans_size); indexes.CopyFrom(sizes_array); for (int32_t axis = 1; axis < num_axes; ++axis) { Array1<int32_t> curr_axis_row_ends = row_ends.Arange(axis * ans_size, (axis + 1) * ans_size); axes_in[axis - 1].row_splits.Index(indexes, &curr_axis_row_ends); indexes = curr_axis_row_ends; } row_ends = row_ends.To(GetCpuContext()); std::vector<RaggedShape> ans(ans_size); for (int32_t i = 0; i != ans_size; ++i) { std::vector<RaggedShapeLayer> axes_out(axes_in.size()); int32_t row_end = row_ends[i]; K2_CHECK(row_end >= 0 && row_end <= dim0); for (int32_t axis = 0; axis < num_axes - 1; ++axis) { axes_out[axis].row_splits = axes_in[axis].row_splits.Arange(0, row_end + 1); row_end = row_ends[i + (axis + 1) * ans_size]; axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end); axes_out[axis].cached_tot_size = row_end; } ans[i] = RaggedShape(axes_out, false); } return ans; } RaggedShape Arange(RaggedShape &src, int32_t axis, int32_t begin, int32_t end, std::pair<int32_t, int32_t> *value_range /*= nullptr*/) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); K2_CHECK_GE(num_axes, 2); K2_CHECK(axis >= 0 && axis < num_axes - 1); K2_CHECK(begin >= 0 && begin <= end && end <= src.TotSize(axis)); if (begin == end) { RaggedShape ans = EmptyRaggedShape(src.Context(), num_axes - axis); // as begin == end, user always get empty values when doing // `src.values.Arange(begin, end)` if (value_range != nullptr) *value_range = std::make_pair(begin, end); return ans; } src.Populate(); ContextPtr &c = src.Context(); const std::vector<RaggedShapeLayer> &axes_in = src.Layers(); int32_t ans_num_axes = num_axes - axis; // `-1` as Layers().size is NumAxes() - 1 std::vector<RaggedShapeLayer> axes_out(ans_num_axes - 1); // get those `row_begin` and `row_end` indexes for all axes in a kernel so we // can do just one GPU to CPU memory transfer. // the format of `indexes` is: row_begin_axis0, row_end_axis0, // row_begin_axis1, row_end_axis2, etc. axis0, axis1 here are the axis of ans. Array1<int32_t> indexes(c, ans_num_axes * 2); int32_t *indexes_data = indexes.Data(); RowSplitsAccessor<5> src_row_splits_acc(src); K2_EVAL( c, 1, lambda_set_indexes, (int32_t i)->void { // we just start a kernel with only one element here. K2_CHECK_EQ(i, 0); int32_t row_begin = begin, row_end = end; indexes_data[0] = row_begin, indexes_data[1] = row_end; for (int32_t cur_axis = axis; cur_axis < num_axes - 1; ++cur_axis) { row_begin = src_row_splits_acc(cur_axis)[row_begin]; row_end = src_row_splits_acc(cur_axis)[row_end]; int32_t indexes_pos = ((cur_axis - axis) + 1) * 2; indexes_data[indexes_pos] = row_begin; indexes_data[indexes_pos + 1] = row_end; } }); indexes = indexes.To(GetCpuContext()); int32_t row_begin = indexes[0], row_end = indexes[1]; for (int32_t cur_axis = axis; cur_axis < num_axes - 1; ++cur_axis) { axes_out[cur_axis - axis].row_splits = axes_in[cur_axis].row_splits.Arange(row_begin, row_end + 1); int32_t row_id = row_begin; int32_t indexes_pos = ((cur_axis - axis) + 1) * 2; row_begin = indexes[indexes_pos]; row_end = indexes[indexes_pos + 1]; axes_out[cur_axis - axis].row_splits = Minus(axes_out[cur_axis - axis].row_splits, row_begin); axes_out[cur_axis - axis].row_ids = axes_in[cur_axis].row_ids.Arange(row_begin, row_end); axes_out[cur_axis - axis].row_ids = Minus(axes_out[cur_axis - axis].row_ids, row_id); axes_out[cur_axis - axis].cached_tot_size = row_end - row_begin; } if (value_range != nullptr) *value_range = std::make_pair(row_begin, row_end); return RaggedShape(axes_out); } Ragged<int32_t> AddSuffixToRagged(Ragged<int32_t> &src, const Array1<int32_t> &suffix) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); K2_CHECK_GE(num_axes, 2); K2_CHECK_EQ(suffix.Dim(), src.TotSize(num_axes - 2)); ContextPtr &c = src.Context(); Array1<int32_t> dst_values(c, src.NumElements() + suffix.Dim()); RaggedShape dst_shape = ChangeSublistSize(src.shape, 1); // "row_splits1" and "row_ids1" below are actually on the last axis. We name // them with "1" so that we can use "idx01" and "idx0" for those indexes in // lambda, following the naming convention explained in k2/csrc/utils.h const int32_t *dst_row_splits1_data = dst_shape.RowSplits(num_axes - 1).Data(), *dst_row_ids1_data = dst_shape.RowIds(num_axes - 1).Data(), *src_values_data = src.values.Data(), *suffix_data = suffix.Data(); int32_t *dst_values_data = dst_values.Data(); K2_EVAL( c, dst_shape.NumElements(), lambda_copy_values, (int32_t idx01)->void { int32_t idx0 = dst_row_ids1_data[idx01]; if (idx01 == dst_row_splits1_data[idx0 + 1] - 1) { // idx01 points to the last element of this row; copy from suffix dst_values_data[idx01] = suffix_data[idx0]; } else { // copy from src int32_t src_idx01 = idx01 - dst_row_ids1_data[idx01]; dst_values_data[idx01] = src_values_data[src_idx01]; } }); return Ragged<int32_t>(dst_shape, dst_values); } Ragged<int32_t> AddPrefixToRagged(Ragged<int32_t> &src, const Array1<int32_t> &prefix) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); K2_CHECK_GE(num_axes, 2); K2_CHECK_EQ(prefix.Dim(), src.TotSize(num_axes - 2)); ContextPtr &c = src.Context(); Array1<int32_t> dst_values(c, src.NumElements() + prefix.Dim()); RaggedShape dst_shape = ChangeSublistSize(src.shape, 1); // "row_splits1" and "row_ids1" below are actually on the last axis. We name // them with "1" so that we can use "idx01" and "idx0" for those indexes in // lambda, following the naming convention explained in k2/csrc/utils.h const int32_t *dst_row_splits1_data = dst_shape.RowSplits(num_axes - 1).Data(), *dst_row_ids1_data = dst_shape.RowIds(num_axes - 1).Data(), *src_values_data = src.values.Data(), *prefix_data = prefix.Data(); int32_t *dst_values_data = dst_values.Data(); K2_EVAL( c, dst_shape.NumElements(), lambda_copy_values, (int32_t idx01)->void { int32_t idx0 = dst_row_ids1_data[idx01]; if (idx01 == dst_row_splits1_data[idx0]) { // idx01 points to the first element of this row; copy from prefix dst_values_data[idx01] = prefix_data[idx0]; } else { // copy from src int32_t src_idx01 = idx01 - dst_row_ids1_data[idx01] - 1; dst_values_data[idx01] = src_values_data[src_idx01]; } }); return Ragged<int32_t>(dst_shape, dst_values); } RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &renumbering) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(renumbering.NumOldElems(), src.NumElements()); // Make sure final row-ids are populated. src.RowIds(src.NumAxes() - 1); std::vector<RaggedShapeLayer> axes = src.Layers(); axes.back().row_ids = axes.back().row_ids[renumbering.New2Old()]; axes.back().row_splits = renumbering.Old2New()[axes.back().row_splits]; axes.back().cached_tot_size = axes.back().row_ids.Dim(); return RaggedShape(axes); } RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &r_before_last, Renumbering &r_last) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(r_before_last.NumOldElems(), src.TotSize(src.NumAxes() - 2)); K2_CHECK_EQ(r_last.NumOldElems(), src.NumElements()); // Make sure final and before-final row-ids are populated. src.RowIds(src.NumAxes() - 2); src.RowIds(src.NumAxes() - 1); std::vector<RaggedShapeLayer> axes = src.Layers(); // Suppose this shape has 3 axes (0,1,2). Its NumAxes()==3; // axes.size()==2. // r_before_last deals with the numbering on axis 1. // r_last deals with the numbering on axis 2. RaggedShapeLayer &before_last = axes[axes.size() - 2], &last = axes[axes.size() - 1]; int32_t new_tot_size1 = r_before_last.NumNewElems(), new_tot_size2 = r_last.NumNewElems(); ContextPtr c = src.Context(); Array1<int32_t> before_last_row_ids(c, new_tot_size1), last_row_splits(c, new_tot_size1 + 1), last_row_ids(c, new_tot_size2); // The variable names below use this 3-axis assumption but the // code will work for greater number of axes. int32_t *new_row_ids1_data = before_last_row_ids.Data(), *new_row_splits2_data = last_row_splits.Data(), *new_row_ids2_data = last_row_ids.Data(); const int32_t *old_row_ids1_data = before_last.row_ids.Data(), *old_row_splits2_data = last.row_splits.Data(), *old_row_ids2_data = last.row_ids.Data(); const int32_t *idx01_new2old_data = r_before_last.New2Old().Data(), *idx01_old2new_data = r_before_last.Old2New().Data(), *idx012_new2old_data = r_last.New2Old().Data(), *idx012_old2new_data = r_last.Old2New().Data(); ParallelRunner pr(c); { With w(pr.NewStream()); // before_last.row_splits maps from idx0 -> idx01 (contains idx01's). Map // the idx01's; the idx0s stay the same. before_last.row_splits = r_before_last.Old2New()[before_last.row_splits]; } { With w(pr.NewStream()); K2_EVAL( c, new_tot_size1 + 1, lambda_set_row_ids1_and_row_splits2, (int32_t new_idx01)->void { // row_ids1 maps from idx01 -> idx0. Select subset of // idx01's; the idx0 stays the same. int32_t old_idx01 = idx01_new2old_data[new_idx01]; if (new_idx01 < new_tot_size1) new_row_ids1_data[new_idx01] = old_row_ids1_data[old_idx01]; // row_splits2 maps from idx01 -> idx012. Map both indexes. // idx01's; the idx0 stays the same. new_row_splits2_data[new_idx01] = idx012_old2new_data[old_row_splits2_data[old_idx01]]; }); } { With w(pr.NewStream()); K2_EVAL( c, new_tot_size2, lambda_set_row_ids2, (int32_t new_idx012)->void { // row_ids2 maps from idx012 -> idx01. Both must be mapped. int32_t old_idx012 = idx012_new2old_data[new_idx012]; int32_t old_idx01 = old_row_ids2_data[old_idx012], new_idx01 = idx01_old2new_data[old_idx01]; new_row_ids2_data[new_idx012] = new_idx01; }); } before_last.row_ids = before_last_row_ids; before_last.cached_tot_size = new_tot_size1; last.row_splits = last_row_splits; last.row_ids = last_row_ids; last.cached_tot_size = new_tot_size2; return RaggedShape(axes); } RaggedShape EmptyRaggedShape(ContextPtr &c, int32_t num_axes) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(num_axes, 2); std::vector<RaggedShapeLayer> axes(num_axes - 1); axes[0].row_splits = Array1<int32_t>(c, 1, 0); // row_ids will be the empty vector, with context `c`. axes[0].row_ids = axes[0].row_splits.Range(0, 0); axes[0].cached_tot_size = 0; for (int32_t a = 1; a + 1 < num_axes; ++a) axes[a] = axes[0]; return RaggedShape(axes); } Array1<int32_t> GetDecreasingSizeOrder(RaggedShape &shape) { NVTX_RANGE(K2_FUNC); ContextPtr &c = shape.Context(); Array1<int32_t> sizes = RowSplitsToSizes(shape.RowSplits(1)); Array1<int32_t> index_map; Sort<int32_t, GreaterThan<int32_t>>(&sizes, &index_map); return index_map; } RaggedShape GetLayer(const RaggedShape &src, int32_t layer) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(layer, 0); K2_CHECK_LT(layer, src.NumAxes() - 1); std::vector<RaggedShapeLayer> layers; layers.push_back(src.Layers()[layer]); bool check = false; return RaggedShape(layers, check); } void DecomposeRaggedShape(const RaggedShape &src, int32_t axis, RaggedShape *top, RaggedShape *bottom) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(axis, 0); K2_CHECK_LT(axis, src.NumAxes() - 1); const std::vector<RaggedShapeLayer> &src_layers = src.Layers(); std::vector<RaggedShapeLayer> top_layers(axis), bottom_layers(src_layers.size() - axis); int32_t src_size = static_cast<int32_t>(src_layers.size()); for (int32_t i = 0; i < axis; ++i) top_layers[i] = src_layers[i]; for (int32_t i = axis; i < src_size; ++i) bottom_layers[i - axis] = src_layers[i]; *top = RaggedShape(top_layers); *bottom = RaggedShape(bottom_layers); } RaggedShape RemoveEmptyLists(RaggedShape &src_shape, int32_t axis, Renumbering *renumbering_out) { NVTX_RANGE(K2_FUNC); if (axis == 0) { return RemoveEmptyListsAxis0(src_shape, renumbering_out); } RaggedShape top_shape, bottom_shape; DecomposeRaggedShape(src_shape, axis, &top_shape, &bottom_shape); Renumbering r_temp; if (!renumbering_out) renumbering_out = &r_temp; bottom_shape = RemoveEmptyListsAxis0(bottom_shape, renumbering_out); top_shape = SubsampleRaggedShape(top_shape, *renumbering_out); return ComposeRaggedShapes(top_shape, bottom_shape); } RaggedShape RemoveSomeEmptyLists(RaggedShape &src_shape, int32_t axis, Renumbering &renumbering) { NVTX_RANGE(K2_FUNC); if (axis == 0) { return RenumberAxis0Simple(src_shape, renumbering); } RaggedShape top_shape, bottom_shape; DecomposeRaggedShape(src_shape, axis, &top_shape, &bottom_shape); bottom_shape = RenumberAxis0Simple(bottom_shape, renumbering); top_shape = SubsampleRaggedShape(top_shape, renumbering); return ComposeRaggedShapes(top_shape, bottom_shape); } RaggedShape RemoveEmptyListsAxis0(RaggedShape &src_shape, Renumbering *renumbering_out) { NVTX_RANGE(K2_FUNC); Renumbering r_temp; if (!renumbering_out) renumbering_out = &r_temp; ContextPtr &c = src_shape.Context(); int32_t num_lists = src_shape.Dim0(); *renumbering_out = Renumbering(c, num_lists); const int32_t *row_splits_data = src_shape.RowSplits(1).Data(); char *keep_data = renumbering_out->Keep().Data(); K2_EVAL( c, num_lists, lambda_set_keep, (int32_t i)->void { keep_data[i] = (row_splits_data[i + 1] != row_splits_data[i]); }); return RenumberAxis0Simple(src_shape, *renumbering_out); } RaggedShape RenumberAxis0Simple(RaggedShape &src_shape, Renumbering &renumbering) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(renumbering.NumOldElems(), src_shape.Dim0()); ContextPtr c = src_shape.Context(); src_shape.RowIds(1); // make sure RowIds(1) is populated. std::vector<RaggedShapeLayer> layers = src_shape.Layers(); int32_t num_layers = layers.size(); int32_t new_num_lists = renumbering.NumNewElems(), num_elems = src_shape.TotSize(1); // unchanged old vs. new. Array1<int32_t> new_row_splits(c, new_num_lists + 1), new_row_ids = renumbering.Old2New()[src_shape.RowIds(1)]; int32_t *new_row_splits_data = new_row_splits.Data(); const int32_t *old_row_splits_data = src_shape.RowSplits(1).Data(), *new2old_data = renumbering.New2Old().Data(); // set `new_row_splits_data`. #ifndef NDEBUG { Array1<int32_t> is_ok(c, 1, 1); int32_t *is_ok_data = is_ok.Data(); int32_t old_num_lists = src_shape.Dim0(); const int32_t *old2new_data = renumbering.Old2New().Data(); K2_EVAL( c, old_num_lists, lambda_check_preconditions, (int32_t i)->void { if (old2new_data[i + 1] == old2new_data[i]) { // This list not kept if (old_row_splits_data[i + 1] != old_row_splits_data[i]) { // this list was nonempty... is_ok_data[0] = 0; } } }); K2_CHECK_NE(is_ok[0], 0) << "RenumberAxis0Simple(): preconditions not met; " "renumbering removes nonempty lists."; } #endif K2_EVAL( c, new_num_lists + 1, lambda_set_new_row_splits, (int32_t new_i)->void { int32_t j; if (new_i == new_num_lists) { j = num_elems; } else { int32_t old_i = new2old_data[new_i]; j = old_row_splits_data[old_i]; } new_row_splits_data[new_i] = j; }); layers[0].row_splits = new_row_splits; layers[0].row_ids = new_row_ids; // no need to set its cached_tot_size; that didn't change. return RaggedShape(layers); } RaggedShape CoveringShape(int32_t num_srcs, RaggedShape **srcs) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_srcs, 0); if (num_srcs == 1) return *srcs[0]; K2_CHECK_EQ(srcs[0]->NumAxes(), 2); int32_t dim0 = srcs[0]->Dim0(); ContextPtr &c = srcs[0]->Context(); for (int32_t i = 1; i != num_srcs; ++i) { K2_CHECK_EQ(srcs[i]->NumAxes(), 2); K2_CHECK_EQ(srcs[i]->Dim0(), dim0); K2_CHECK(c->IsCompatible(*srcs[i]->Context())); } // get row splits of srcs Array1<int32_t *> row_splits_ptrs(GetCpuContext(), num_srcs); int32_t **splits_ptr_data = row_splits_ptrs.Data(); for (int32_t i = 0; i != num_srcs; ++i) { splits_ptr_data[i] = srcs[i]->RowSplits(1).Data(); } row_splits_ptrs = row_splits_ptrs.To(c); int32_t **src_row_splits_ptr_data = row_splits_ptrs.Data(); RaggedShape shape = RegularRaggedShape(c, dim0, num_srcs); Array1<int32_t> values(c, dim0 * num_srcs); // elements in row i of `sublist_sizes` are the sizes of row i // of src[0], src[1]... Ragged<int32_t> sublist_sizes(shape, values); int32_t *values_data = sublist_sizes.values.Data(); K2_EVAL2( c, dim0, num_srcs, lambda_set_sublist_sizes, (int32_t i, int32_t j)->void { values_data[i * num_srcs + j] = src_row_splits_ptr_data[j][i + 1] - src_row_splits_ptr_data[j][i]; }); Array1<int32_t> ans_row_splits(c, dim0 + 1); Array1<int32_t> ans_row_sizes = ans_row_splits.Arange(0, dim0); MaxPerSublist(sublist_sizes, 0, &ans_row_sizes); ExclusiveSum(ans_row_sizes, &ans_row_splits); return RaggedShape2(&ans_row_splits, nullptr, -1); } Array1<int32_t> CoveringShapeForwardMap(RaggedShape &src, RaggedShape &covering) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(src.NumAxes(), 2); K2_CHECK_EQ(covering.NumAxes(), 2); K2_CHECK_EQ(src.Dim0(), covering.Dim0()); int32_t num_elems = covering.NumElements(); K2_CHECK_GE(num_elems, src.NumElements()); ContextPtr c = GetContext(src, covering); Array1<int32_t> ans(c, num_elems); int32_t *ans_data = ans.Data(); const int32_t *covering_row_splits_data = covering.RowSplits(1).Data(), *covering_row_ids_data = covering.RowIds(1).Data(), *src_row_splits_data = src.RowSplits(1).Data(); K2_EVAL( c, num_elems, lambda_set_value, (int32_t covering_idx01)->void { int32_t covering_idx0 = covering_row_ids_data[covering_idx01], covering_idx0x = covering_row_splits_data[covering_idx0], covering_idx1 = covering_idx01 - covering_idx0x; // src and covering has the same dim0 int32_t src_idx0x = src_row_splits_data[covering_idx0], src_cur_row_size = src_row_splits_data[covering_idx0 + 1] - src_idx0x; K2_DCHECK_GE( covering_row_splits_data[covering_idx0 + 1] - covering_idx0x, src_cur_row_size); if (covering_idx1 >= src_cur_row_size) ans_data[covering_idx01] = -1; else ans_data[covering_idx01] = src_idx0x + covering_idx1; // src_idx01 }); return ans; } void RaggedShapeAxis0Splitter::Init(RaggedShape &src) { NVTX_RANGE(K2_FUNC); int32_t num_layers = src.NumLayers(), num_layers_out = num_layers - 1, dim0 = src.Dim0(); K2_CHECK_LE(num_layers_out, 4); // If this fails, add something to the 4s and // 5s here and in the header. K2_CHECK_GT(num_layers, 1); ContextPtr c = src.Context(); composite_row_splits_ = Array2<int32_t>(c, num_layers + 1, dim0 + 1); Array2Accessor<int32_t> composite_row_splits_acc = composite_row_splits_.Accessor(); RowSplitsAccessor<5> src_row_splits_acc(src); SmallVec<int32_t *, 5> row_splits_out_acc; K2_CHECK(num_layers_out <= 5); Array1<int32_t> garbage1(c, dim0 + dim0 + 1); // won't be read. row_splits_out_acc.data[0] = garbage1.Data(); for (int32_t l = 0; l < num_layers_out; l++) { row_splits_out_[l] = Array1<int32_t>(c, src.TotSize(l + 1) + dim0 + 1); row_splits_out_acc.data[l + 1] = row_splits_out_[l].Data(); } // set composite_row_splits_ and also those elements of // the output row_splits which are bound to be zero. K2_EVAL( c, dim0 + 1, lambda_set_composite_row_splits, (int32_t i)->void { int32_t cur_pos = i; composite_row_splits_acc(0, i) = cur_pos; for (int32_t l = 0; l < num_layers; l++) { // The following statement sets the zero at the beginning of each // row_splits, plus a final zero that we write to avoid an // if-statement. row_splits_out_acc.data[l][cur_pos + i] = 0; cur_pos = src_row_splits_acc.ptrs[l][cur_pos]; composite_row_splits_acc(l + 1, i) = cur_pos; } }); composite_row_splits_cpu_ = composite_row_splits_.To(GetCpuContext()); // Right now to_idx0 maps from an idx0 to an idx0 (identity map); next time it // will map from an idx01 to to an idx0, then idx012 to idx0 (all w.r.t. src). // It doesn't include the extra last element like a row_splits would; it's // like a composite row_ids vector: row_ids1, row_ids12 and so on. Array1<int32_t> to_idx0 = composite_row_splits_.Row(0).Arange(0, dim0); for (int32_t layer = 0; layer < num_layers_out; layer++) row_ids_out_[layer] = Array1<int32_t>(c, src.TotSize(layer + 2)); Array1<int32_t> garbage2(c, src.TotSize(1)); // corresponds to row_ids_out_[-1]. for (int32_t layer = 0; layer <= num_layers_out; layer++) { // num_elems is the number of elements we process in this kernel. int32_t num_elems = src.TotSize(layer + 1); // The names here are valid for layer == 1; this just happens to be useful // for exposition. const int32_t *src_row_ids2_data = src.RowIds(layer + 1).Data(), *idx01_to_idx0_data = to_idx0.Data(); int32_t *row_ids1_out_data = (layer == 0 ? garbage2.Data() : row_ids_out_[layer - 1].Data()); if (layer < num_layers_out) { Array1<int32_t> to_idx0_next(c, num_elems); int32_t *row_splits2_out_data = row_splits_out_[layer].Data(), *idx012_to_idx0_data = to_idx0_next.Data(); const int32_t *src_row_splits3_data = src.RowSplits(layer + 2).Data(); // row_splits3 maps from idx012 -> idx012x. // remember: the names are valid for layer == 1, just as an example. K2_EVAL( c, num_elems, lambda_set_row_splits_and_ids, (int32_t src_idx012)->void { int32_t src_idx01 = src_row_ids2_data[src_idx012], src_idx012x_next = src_row_splits3_data[src_idx012 + 1], src_idx0 = idx01_to_idx0_data[src_idx01]; idx012_to_idx0_data[src_idx012] = src_idx0; // <-- output here. int32_t src_idx0x = composite_row_splits_acc(layer, src_idx0), src_idx0xxx = composite_row_splits_acc(layer + 2, src_idx0), src_idx1 = src_idx01 - src_idx0x, src_idx12x_next = src_idx012x_next - src_idx0xxx, out_idx0 = src_idx1, out_idx01x_next = src_idx12x_next; row_ids1_out_data[src_idx012] = out_idx0; // below, the "+1" is because each element handles the next one // within this output row_splits array, with the zeros (1st elem of // each output row_splits array) handled by // lambda_set_composite_row_splits. The "+ idx0" is to make room // for the extra final element of all the previous row_splits // arrays. row_splits2_out_data[src_idx012 + 1 + src_idx0] = out_idx01x_next; }); to_idx0 = to_idx0_next; } else { // The next code is a subset of the other branch. K2_EVAL( c, num_elems, lambda_set_row_ids, (int32_t src_idx012)->void { int32_t src_idx01 = src_row_ids2_data[src_idx012], idx0 = idx01_to_idx0_data[src_idx01], src_idx0x = composite_row_splits_acc(layer, idx0), src_idx1 = src_idx01 - src_idx0x, out_idx0 = src_idx1; row_ids1_out_data[src_idx012] = out_idx0; }); } } } RaggedShape RaggedShapeAxis0Splitter::GetElement(int32_t i, int32_t *elem_offset) { NVTX_RANGE(K2_FUNC); int32_t num_layers_out = composite_row_splits_.Dim0() - 2; std::vector<RaggedShapeLayer> out; out.reserve(num_layers_out); auto composite_row_splits_cpu_acc = composite_row_splits_cpu_.Accessor(); for (int32_t layer = 0; layer < num_layers_out; layer++) { int32_t row_begin = composite_row_splits_cpu_acc(layer + 1, i), row_end = composite_row_splits_cpu_acc(layer + 1, i + 1), elem_begin = composite_row_splits_cpu_acc(layer + 2, i), elem_end = composite_row_splits_cpu_acc(layer + 2, i + 1), num_elems = elem_end - elem_begin; if (layer + 1 == num_layers_out && elem_offset != nullptr) *elem_offset = elem_begin; // the "+ i" is to account for the extra final elements of preceding // row_splits vectors; the + 1 is for the final element of this one. Array1<int32_t> splits = row_splits_out_[layer].Arange(row_begin + i, row_end + i + 1), ids = row_ids_out_[layer].Arange(elem_begin, elem_end); out.emplace_back(RaggedShapeLayer{splits, ids, num_elems}); } // TODO: when thoroughly debugged, maybe turn off validation? return RaggedShape(out); } namespace hash_internal { // Utilities for hashing strings (actually: sequences of int32_t). /* T can be int32_t or int64_t. The following code shows what we are computing: std::vector<int32_t> input; T hash1 = 13, hash2 = 787; for (size_t i = 0; i < input.size(); i++) { hash1 = 31 * hash1 + input[i]; hash2 = 167 * hash2 + input[i]; } hash = hash1 + 104729 * hash2; I'm not sure that these constants are very optimal, but they are primes. The actual calculation is a little different from the above because of the need to do it via a reduction. */ template <typename T> struct Hash { T hash1; T hash2; T product1; T product2; // Would like this to be a POD type so not adding the following constructor: // Hash(int32_t i): hash1(i), hash2(i), product1(31), product2(167) { } // .. but implementing it in HashInputIterator. }; template <typename T> struct HashInputIterator { explicit __host__ __device__ __forceinline__ HashInputIterator(const int32_t *i) // NOLINT : i_(i) {} __device__ __forceinline__ Hash<T> operator[](int32_t idx) const { return Hash<T>{i_[idx], i_[idx], 31, 167}; } __device__ __forceinline__ HashInputIterator operator+(int32_t offset) const { return HashInputIterator(i_ + offset); } const int32_t *i_; }; template <typename T> struct HashOutputIteratorDeref { // this is what you get when you dereference // HashOutputIterator, it pretends to be a // Hash<T> but really only stores the `idx` // member. explicit __device__ __forceinline__ HashOutputIteratorDeref(T *t) : t_(t) {} __device__ __forceinline__ HashOutputIteratorDeref &operator=( const Hash<T> &h) { *t_ = h.hash1 + 13 * h.product1 + 104729 * h.hash2 + (104729 * 787) * h.product2; return *this; } T *t_; }; template <typename T> struct HashOutputIterator { // outputs just the index of the pair. explicit HashOutputIterator(T *t) : t_(t) {} __device__ __forceinline__ HashOutputIteratorDeref<T> operator[]( int32_t idx) const { return HashOutputIteratorDeref<T>(t_ + idx); } __device__ __forceinline__ HashOutputIterator operator+(size_t offset) { return HashOutputIterator{t_ + offset}; } T *t_; }; template <typename T> struct HashCombineOp { __device__ __forceinline__ Hash<T> operator()(const Hash<T> &a, const Hash<T> &b) const { return Hash<T>{a.hash1 * b.product1 + b.hash1, a.hash2 * b.product2 + b.hash2, a.product1 * b.product1, a.product2 * b.product2}; } }; } // namespace hash_internal } // namespace k2 namespace std { // those below typedefs are required by cub::DeviceSegmentedReduce:Reduce template <typename T> struct iterator_traits<k2::hash_internal::HashInputIterator<T>> { typedef k2::hash_internal::Hash<T> value_type; }; template <typename T> struct iterator_traits<k2::hash_internal::HashOutputIterator<T>> { typedef k2::hash_internal::Hash<T> value_type; typedef k2::hash_internal::HashOutputIteratorDeref<T> reference; }; } // namespace std namespace k2 { template <typename T> Array1<T> ComputeHash(Ragged<int32_t> &src) { NVTX_RANGE(K2_FUNC); int32_t last_axis = src.NumAxes() - 1; const Array1<int32_t> &row_splits_array = src.RowSplits(last_axis); int32_t num_rows = row_splits_array.Dim() - 1; ContextPtr &c = src.Context(); Array1<T> ans(c, num_rows); const int32_t *row_splits = row_splits_array.Data(); const int32_t *values_data = src.values.Data(); T *output_data = ans.Data(); if (c->GetDeviceType() == kCpu) { int32_t j = row_splits[0]; for (int32_t i = 0; i < num_rows; ++i) { T hash1 = 13, hash2 = 787; int32_t row_end = row_splits[i + 1]; for (; j < row_end; ++j) { T elem = values_data[j]; hash1 = 31 * hash1 + elem; hash2 = 167 * hash2 + elem; } T hash = hash1 + 104729 * hash2; output_data[i] = hash; } } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); hash_internal::HashInputIterator<T> input_iter(values_data); hash_internal::HashOutputIterator<T> output_iter(output_data); hash_internal::HashCombineOp<T> op; hash_internal::Hash<T> initial_hash{ 0, 0, 1, 1 }; // This code is based on the example here: // https://nvlabs.github.io/cub/structcub_1_1_device_segmented_reduce.html std::size_t temp_storage_bytes = 0; // the first time is to determine temporary device storage requirements K2_CUDA_SAFE_CALL(cub::DeviceSegmentedReduce::Reduce( nullptr, temp_storage_bytes, input_iter, output_iter, num_rows, row_splits, row_splits + 1, op, initial_hash, c->GetCudaStream())); Array1<int8_t> d_temp_storage(c, temp_storage_bytes); K2_CUDA_SAFE_CALL(cub::DeviceSegmentedReduce::Reduce( d_temp_storage.Data(), temp_storage_bytes, input_iter, output_iter, num_rows, row_splits, row_splits + 1, op, initial_hash, c->GetCudaStream())); } return ans; } Ragged<int32_t> UniqueSequences(Ragged<int32_t> &src, Ragged<int32_t> *num_repeats /*=nullptr*/, Array1<int32_t> *new2old_indexes /*=nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); if (src.NumAxes() == 2) { // Put 'fake' layer at front, process, then remove. Ragged<int32_t> temp = Unsqueeze(src, 0); return UniqueSequences(temp, num_repeats, new2old_indexes).RemoveAxis(0); } Array1<int64_t> hashes = ComputeHash<int64_t>(src); int32_t hashes_dim = hashes.Dim(); Array1<int32_t> order(c, hashes_dim); // Using the layer before the last layer of `src` for the shape of // `ragged_hashes` Ragged<int64_t> ragged_hashes(GetLayer(src.shape, src.shape.NumLayers() - 2), hashes); SortSublists<int64_t, LessThan<int64_t> >(&ragged_hashes, &order); Renumbering renumber_lists(c, hashes.Dim()); const int32_t *ragged_hashes_row_ids_data = ragged_hashes.RowIds(1).Data(), *ragged_hashes_row_splits_data = ragged_hashes.RowSplits(1).Data(); const int64_t *ragged_hashes_data = ragged_hashes.values.Data(); char *keep_list_data = renumber_lists.Keep().Data(); K2_EVAL( c, hashes_dim, lambda_set_keep, (int32_t i)->void { char keep; if (i == ragged_hashes_row_splits_data[ragged_hashes_row_ids_data[i]]) { // this is the first element of its sub-list in `ragged_hashes`. keep = 1; } else { keep = (ragged_hashes_data[i] != ragged_hashes_data[i - 1]); } keep_list_data[i] = keep; }); Array1<int32_t> new2old = renumber_lists.New2Old(), new2unsorted = order[new2old]; Ragged<int32_t> ans = Index(src, src.NumAxes() - 2, new2unsorted); if (num_repeats != nullptr) { int32_t new2old_dim = new2old.Dim(); Array1<int32_t> num_repeats_array(c, new2old_dim); const int32_t *new2old_data = new2old.Data(); int32_t *num_repeats_data = num_repeats_array.Data(); K2_EVAL( c, new2old_dim, set_num_repeats, (int32_t i)->void { if (i < new2old_dim - 1) { num_repeats_data[i] = new2old_data[i + 1] - new2old_data[i]; } else { num_repeats_data[i] = hashes_dim - new2old_data[i]; } }); *num_repeats = Ragged<int32_t>(GetLayer(ans.shape, ans.NumAxes() - 3), num_repeats_array); } if (new2old_indexes != nullptr) { *new2old_indexes = std::move(new2unsorted); } return ans; } // Instantiate template for int64 and int32. template Array1<int64_t> ComputeHash(Ragged<int32_t> &src); template Array1<int32_t> ComputeHash(Ragged<int32_t> &src); } // namespace k2
b73f902fc980c837f3ee509d44af8b1c6fcdee37.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <cuspatial/haversine.hpp> #include <utilities/legacy/error_utils.hpp> #include <tests/utilities/legacy/cudf_test_utils.cuh> #include <tests/utilities/legacy/column_wrapper.cuh> #include <tests/utilities/legacy/cudf_test_fixtures.h> struct HaversineToy : public GdfTest { }; TEST_F(HaversineToy, haversinetest) { const uint32_t num_point=3; double h_point_x[]={ -74.0060, 2.3522,151.2093}; double h_point_y[]={40.7128,48.8566,-33.8688}; const char *cities[]={"New York","Paris","Sydney"}; double *h_pair_x1=new double[num_point*num_point]; double *h_pair_y1=new double[num_point*num_point]; double *h_pair_x2=new double[num_point*num_point]; double *h_pair_y2=new double[num_point*num_point]; CUDF_EXPECTS(h_pair_x1!=nullptr&&h_pair_y1!=nullptr&&h_pair_x2!=nullptr&&h_pair_y2!=nullptr, "invalid point pair x/y arrays"); for(size_t i=0; i<num_point;i++) for(size_t j=0; j<num_point;j++) { h_pair_x1[i*num_point+j]=h_point_x[i]; h_pair_y1[i*num_point+j]=h_point_y[i]; h_pair_x2[i*num_point+j]=h_point_x[j]; h_pair_y2[i*num_point+j]=h_point_y[j]; } cudf::test::column_wrapper<double> point_x1_wrapp{std::vector<double>(h_pair_x1,h_pair_x1+num_point*num_point)}; cudf::test::column_wrapper<double> point_y1_wrapp{std::vector<double>(h_pair_y1,h_pair_y1+num_point*num_point)}; cudf::test::column_wrapper<double> point_x2_wrapp{std::vector<double>(h_pair_x2,h_pair_x2+num_point*num_point)}; cudf::test::column_wrapper<double> point_y2_wrapp{std::vector<double>(h_pair_y2,h_pair_y2+num_point*num_point)}; gdf_column dist=cuspatial::haversine_distance( *(point_x1_wrapp.get()), *(point_y1_wrapp.get()),*(point_x2_wrapp.get()),*(point_y2_wrapp.get())); double *h_dist=new double[dist.size]; CUDA_TRY(hipMemcpy(h_dist, dist.data, dist.size*sizeof(double), hipMemcpyDeviceToHost)); CUDF_EXPECTS(fabs(h_dist[0])<1e-10&&fabs(h_dist[4])<1e-10&&fabs(h_dist[8])<1e-10, "distance between the same points should be close to 0"); std::cout<<"dist("<<cities[0]<<","<<cities[1]<<")="<<h_dist[1]<<std::endl; std::cout<<"dist("<<cities[0]<<","<<cities[2]<<")="<<h_dist[2]<<std::endl; std::cout<<"dist("<<cities[1]<<","<<cities[0]<<")="<<h_dist[3]<<std::endl; std::cout<<"dist("<<cities[1]<<","<<cities[2]<<")="<<h_dist[5]<<std::endl; std::cout<<"dist("<<cities[2]<<","<<cities[0]<<")="<<h_dist[6]<<std::endl; std::cout<<"dist("<<cities[2]<<","<<cities[1]<<")="<<h_dist[7]<<std::endl; delete[] h_pair_x1; delete[] h_pair_y1; delete[] h_pair_x2; delete[] h_pair_y2; delete[] h_dist; }
b73f902fc980c837f3ee509d44af8b1c6fcdee37.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <cuspatial/haversine.hpp> #include <utilities/legacy/error_utils.hpp> #include <tests/utilities/legacy/cudf_test_utils.cuh> #include <tests/utilities/legacy/column_wrapper.cuh> #include <tests/utilities/legacy/cudf_test_fixtures.h> struct HaversineToy : public GdfTest { }; TEST_F(HaversineToy, haversinetest) { const uint32_t num_point=3; double h_point_x[]={ -74.0060, 2.3522,151.2093}; double h_point_y[]={40.7128,48.8566,-33.8688}; const char *cities[]={"New York","Paris","Sydney"}; double *h_pair_x1=new double[num_point*num_point]; double *h_pair_y1=new double[num_point*num_point]; double *h_pair_x2=new double[num_point*num_point]; double *h_pair_y2=new double[num_point*num_point]; CUDF_EXPECTS(h_pair_x1!=nullptr&&h_pair_y1!=nullptr&&h_pair_x2!=nullptr&&h_pair_y2!=nullptr, "invalid point pair x/y arrays"); for(size_t i=0; i<num_point;i++) for(size_t j=0; j<num_point;j++) { h_pair_x1[i*num_point+j]=h_point_x[i]; h_pair_y1[i*num_point+j]=h_point_y[i]; h_pair_x2[i*num_point+j]=h_point_x[j]; h_pair_y2[i*num_point+j]=h_point_y[j]; } cudf::test::column_wrapper<double> point_x1_wrapp{std::vector<double>(h_pair_x1,h_pair_x1+num_point*num_point)}; cudf::test::column_wrapper<double> point_y1_wrapp{std::vector<double>(h_pair_y1,h_pair_y1+num_point*num_point)}; cudf::test::column_wrapper<double> point_x2_wrapp{std::vector<double>(h_pair_x2,h_pair_x2+num_point*num_point)}; cudf::test::column_wrapper<double> point_y2_wrapp{std::vector<double>(h_pair_y2,h_pair_y2+num_point*num_point)}; gdf_column dist=cuspatial::haversine_distance( *(point_x1_wrapp.get()), *(point_y1_wrapp.get()),*(point_x2_wrapp.get()),*(point_y2_wrapp.get())); double *h_dist=new double[dist.size]; CUDA_TRY(cudaMemcpy(h_dist, dist.data, dist.size*sizeof(double), cudaMemcpyDeviceToHost)); CUDF_EXPECTS(fabs(h_dist[0])<1e-10&&fabs(h_dist[4])<1e-10&&fabs(h_dist[8])<1e-10, "distance between the same points should be close to 0"); std::cout<<"dist("<<cities[0]<<","<<cities[1]<<")="<<h_dist[1]<<std::endl; std::cout<<"dist("<<cities[0]<<","<<cities[2]<<")="<<h_dist[2]<<std::endl; std::cout<<"dist("<<cities[1]<<","<<cities[0]<<")="<<h_dist[3]<<std::endl; std::cout<<"dist("<<cities[1]<<","<<cities[2]<<")="<<h_dist[5]<<std::endl; std::cout<<"dist("<<cities[2]<<","<<cities[0]<<")="<<h_dist[6]<<std::endl; std::cout<<"dist("<<cities[2]<<","<<cities[1]<<")="<<h_dist[7]<<std::endl; delete[] h_pair_x1; delete[] h_pair_y1; delete[] h_pair_x2; delete[] h_pair_y2; delete[] h_dist; }
d01bad999b8b72fcd9cf18605e6239c39e5bbfff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_left_bndcon; int xdim0_left_bndcon_h = -1; #undef OPS_ACC0 #define OPS_ACC0(x,y) (x+xdim0_left_bndcon*(y)) //user function __device__ void left_bndcon_gpu(double *A, const int *idx) { A[OPS_ACC0(0,0)] = sin(pi * (idx[1]+1) / (jmax+1)); } #undef OPS_ACC0 __global__ void ops_left_bndcon( double* __restrict arg0, int arg_idx0, int arg_idx1, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int arg_idx[2]; arg_idx[0] = arg_idx0+idx_x; arg_idx[1] = arg_idx1+idx_y; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_left_bndcon; if (idx_x < size0 && idx_y < size1) { left_bndcon_gpu(arg0, arg_idx); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_left_bndcon(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { #else void ops_par_loop_left_bndcon_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; #endif //Timing double t1,t2,c1,c2; ops_arg args[2] = { arg0, arg1}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,2,range,2)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(2,"left_bndcon"); OPS_kernels[2].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int arg_idx[2]; #ifdef OPS_MPI #ifdef OPS_LAZY ops_block block = desc->block; sub_block_list sb = OPS_sub_block_list[block->index]; #endif arg_idx[0] = sb->decomp_disp[0]+start[0]; arg_idx[1] = sb->decomp_disp[1]+start[1]; #else arg_idx[0] = start[0]; arg_idx[1] = start[1]; #endif int xdim0 = args[0].dat->size[0]; if (xdim0 != xdim0_left_bndcon_h) { hipMemcpyToSymbol( xdim0_left_bndcon, &xdim0, sizeof(int) ); xdim0_left_bndcon_h = xdim0; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); char *p_a[2]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args,2,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_left_bndcon), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], arg_idx[0], arg_idx[1],x_size, y_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[2].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0); } } #ifdef OPS_LAZY void ops_par_loop_left_bndcon(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 2; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 2; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 2; desc->args = (ops_arg*)malloc(2*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->function = ops_par_loop_left_bndcon_execute; if (OPS_diags > 1) { ops_timing_realloc(2,"left_bndcon"); } ops_enqueue_kernel(desc); } #endif
d01bad999b8b72fcd9cf18605e6239c39e5bbfff.cu
// // auto-generated by ops.py // __constant__ int xdim0_left_bndcon; int xdim0_left_bndcon_h = -1; #undef OPS_ACC0 #define OPS_ACC0(x,y) (x+xdim0_left_bndcon*(y)) //user function __device__ void left_bndcon_gpu(double *A, const int *idx) { A[OPS_ACC0(0,0)] = sin(pi * (idx[1]+1) / (jmax+1)); } #undef OPS_ACC0 __global__ void ops_left_bndcon( double* __restrict arg0, int arg_idx0, int arg_idx1, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int arg_idx[2]; arg_idx[0] = arg_idx0+idx_x; arg_idx[1] = arg_idx1+idx_y; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_left_bndcon; if (idx_x < size0 && idx_y < size1) { left_bndcon_gpu(arg0, arg_idx); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_left_bndcon(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { #else void ops_par_loop_left_bndcon_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; #endif //Timing double t1,t2,c1,c2; ops_arg args[2] = { arg0, arg1}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,2,range,2)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(2,"left_bndcon"); OPS_kernels[2].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int arg_idx[2]; #ifdef OPS_MPI #ifdef OPS_LAZY ops_block block = desc->block; sub_block_list sb = OPS_sub_block_list[block->index]; #endif arg_idx[0] = sb->decomp_disp[0]+start[0]; arg_idx[1] = sb->decomp_disp[1]+start[1]; #else arg_idx[0] = start[0]; arg_idx[1] = start[1]; #endif int xdim0 = args[0].dat->size[0]; if (xdim0 != xdim0_left_bndcon_h) { cudaMemcpyToSymbol( xdim0_left_bndcon, &xdim0, sizeof(int) ); xdim0_left_bndcon_h = xdim0; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); char *p_a[2]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args,2,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data ops_left_bndcon<<<grid, tblock >>> ( (double *)p_a[0], arg_idx[0], arg_idx[1],x_size, y_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[2].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0); } } #ifdef OPS_LAZY void ops_par_loop_left_bndcon(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 2; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 2; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 2; desc->args = (ops_arg*)malloc(2*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->function = ops_par_loop_left_bndcon_execute; if (OPS_diags > 1) { ops_timing_realloc(2,"left_bndcon"); } ops_enqueue_kernel(desc); } #endif
96a0b6c8f64f5ef27296f1b8035da49c1a0eaf86.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"rbsspf_motion.cuh" //==================================================== //1: init control and particles __host__ void hostCalculateMotionControl(TrackerSampleControl & control) { double ratio=1,vratio=1,maxratio=1; CALRATIO(ratio,vratio,maxratio,control.motionoffset.a,control.motionprec.a); CALRATIO(ratio,vratio,maxratio,control.motionoffset.v,control.motionprec.v); CALRATIO(ratio,vratio,maxratio,control.motionoffset.omega,control.motionprec.omega); control.motioniteration=log(vratio)/log(2); control.motionanneal=maxratio*maxratio; control.motionannealratio=pow(control.motionanneal,-1/control.motioniteration); CALZOOM(control.motionzoom.a,control.motionoffset.a,control.motionprec.a,control.motioniteration); CALZOOM(control.motionzoom.v,control.motionoffset.v,control.motionprec.v,control.motioniteration); CALZOOM(control.motionzoom.omega,control.motionoffset.omega,control.motionprec.omega,control.motioniteration); } __host__ bool hostInitializeMotion(Tracker & tracker, TrackerSampleControl & control) { control.motionmin=MOTIONMIN; control.motionmax=MOTIONMAX; control.motionprec=MOTIONPREC; switch(tracker.status) { case StatusInitMotion: { control.pfflag=0; control.motionoffset=INITMOTIONOFFSET; control.pnum=1; hostCalculateMotionControl(control); } break; case StatusUpdateTracker_SSPF: { control.pfflag=0; control.motionoffset=UPDATEMOTIONOFFSET_SSPF; control.pnum=1; hostCalculateMotionControl(control); } break; case StatusUpdateTracker_PF: { control.pfflag=1; control.motionoffset=UPDATEMOTIONOFFSET_PF; control.pnum=MRQPN; control.motioniteration=0; } break; default: { control.motioniteration=-1; control.pnum=0; } break; } return control.motioniteration>=0; } __host__ double hostInitMotionEstimation(int trackernum, std::vector<Tracker> & trackers, std::vector<TrackerSampleControl> & controls, int & pnum, std::vector<TrackerParticle> & particles) { double maxmotioniteration=-1; pnum=0; for(int i=0;i<trackernum;i++) { controls[i].id=i; if(hostInitializeMotion(trackers[i],controls[i])) { if(maxmotioniteration<controls[i].motioniteration) maxmotioniteration=controls[i].motioniteration; for(int j=0;j<controls[i].pnum;j++) { particles[pnum].state=trackers[i].mean; particles[pnum].controlid=i; particles[pnum].weight=0; particles[pnum].beamcount=0; pnum++; } } } return maxmotioniteration; } //==================================================== //2: upsample __host__ __device__ void deviceAckermannModel(TrackerParticle & particle, EgoMotion & egomotion) { double c=cos(particle.state.theta); double s=sin(particle.state.theta); if(particle.state.k==0) { particle.state.x=particle.state.x+c*particle.state.v*egomotion.dt; particle.state.y=particle.state.y+s*particle.state.v*egomotion.dt; particle.state.a=0; } else { double c0=cos(particle.state.theta+particle.state.a); double s0=sin(particle.state.theta+particle.state.a); particle.state.omega=particle.state.v*particle.state.k; double dtheta=particle.state.omega*egomotion.dt; particle.state.theta+=dtheta; double c1=cos(particle.state.theta+particle.state.a); double s1=sin(particle.state.theta+particle.state.a); double R=1/particle.state.k; particle.state.x=particle.state.x+R*(-s0+s1); particle.state.y=particle.state.y+R*(c0-c1); } } __host__ __device__ void deviceEgoMotion(TrackerParticle & particle, EgoMotion & egomotion) { double c=cos(egomotion.dtheta); double s=sin(egomotion.dtheta); double tmpx=c*particle.state.x-s*particle.state.y+egomotion.dx; double tmpy=s*particle.state.x+c*particle.state.y+egomotion.dy; particle.state.x=tmpx; particle.state.y=tmpy; particle.state.theta+=egomotion.dtheta; } __global__ void kernelMotionUpSample(TrackerParticle *particles, TrackerSampleControl *controls, TrackerParticle *tmpparticles, TrackerParticle *tmpparticles_forward, int tmppnum, thrust::random::minstd_rand *rng, EgoMotion egomotion, int beamnum, int *beamcount) { GetThreadID_1D(tmppid); if(tmppid>=tmppnum) return; int pid=int(tmppid/SPN); int cid=particles[pid].controlid; int rid=tmppid%RNGNUM; TrackerSampleControl control=controls[cid]; TrackerParticle particle=particles[pid]; if(control.motioniteration<1) { tmpparticles[tmppid]=particle; tmpparticles_forward[tmppid]=particle; beamcount[tmppid]=0; return; } if(control.pfflag) { particle.state.v=thrust::random::normal_distribution<double>(particle.state.v,control.motionoffset.v)(rng[rid]); particle.state.v=particle.state.v>control.motionmin.v?particle.state.v:control.motionmin.v; particle.state.v=particle.state.v<control.motionmax.v?particle.state.v:control.motionmax.v; particle.state.omega=thrust::random::normal_distribution<double>(particle.state.omega,control.motionoffset.omega)(rng[rid]); particle.state.omega=particle.state.omega>control.motionmin.omega?particle.state.omega:control.motionmin.omega; particle.state.omega=particle.state.omega<control.motionmax.omega?particle.state.omega:control.motionmax.omega; } else { double vmin=particle.state.v-control.motionoffset.v; vmin=vmin>control.motionmin.v?vmin:control.motionmin.v; double vmax=particle.state.v+control.motionoffset.v; vmax=vmax<control.motionmax.v?vmax:control.motionmax.v; particle.state.v=thrust::random::uniform_real_distribution<double>(vmin,vmax)(rng[rid]); double omegamin=particle.state.omega-control.motionoffset.omega; omegamin=omegamin>control.motionmin.omega?omegamin:control.motionmin.omega; double omegamax=particle.state.omega+control.motionoffset.omega; omegamax=omegamax<control.motionmax.omega?omegamax:control.motionmax.omega; particle.state.omega=thrust::random::uniform_real_distribution<double>(omegamin,omegamax)(rng[rid]); } if(particle.state.v==0) { particle.state.k=0; } else { particle.state.k=particle.state.omega/particle.state.v; particle.state.k=particle.state.k>control.motionmin.k?particle.state.k:control.motionmin.k; particle.state.k=particle.state.k<control.motionmax.k?particle.state.k:control.motionmax.k; } particle.state.omega=particle.state.v*particle.state.k; if(control.pfflag) { particle.state.a=thrust::random::normal_distribution<double>(particle.state.a,control.motionoffset.a)(rng[rid]); particle.state.a=particle.state.a>control.motionmin.a?particle.state.a:control.motionmin.a; particle.state.a=particle.state.a<control.motionmax.a?particle.state.a:control.motionmax.a; } else { double R,phi; if(particle.state.k!=0) { R=1/fabs(particle.state.k); phi=atan2(6.0,R); } double amin,amax; if(particle.state.omega>0) { amin=DEG2RAD(-30); amax=phi; amax=amax>amin?amax:amin; } else if(particle.state.omega<0) { amax=DEG2RAD(30); amin=-phi; amin=amin<amax?amin:amax; } else { amin=0; amax=0; } particle.state.a=thrust::random::uniform_real_distribution<double>(amin,amax)(rng[rid]); } tmpparticles[tmppid]=particle; deviceAckermannModel(particle,egomotion); deviceEgoMotion(particle,egomotion); deviceBuildModel(particle,beamnum); tmpparticles_forward[tmppid]=particle; tmpparticles[tmppid].geometry.validflag=particle.geometry.validflag; beamcount[tmppid]=particle.geometry.beamcount; } //==================================================== //8: estimate tracker __host__ void hostEstimateMotionTracker(int pnum, std::vector<TrackerParticle> & particles, std::vector<Tracker> & trackers, int beamnum) { TrackerState minstate; TrackerState maxstate; double weightsum; int cid=-1; for(int i=0;i<=pnum;i++) { bool flag=(i>=pnum)||(cid!=particles[i].controlid); if(flag) { if(cid>=0) { trackers[cid].mean.x/=weightsum; trackers[cid].mean.y/=weightsum; trackers[cid].mean.theta/=weightsum; trackers[cid].mean.a/=weightsum; trackers[cid].mean.v/=weightsum; trackers[cid].mean.k/=weightsum; trackers[cid].mean.omega/=weightsum; trackers[cid].beamcount/=weightsum; trackers[cid].sigma.x=::max(trackers[cid].mean.x-minstate.x,maxstate.x-trackers[cid].mean.x); trackers[cid].sigma.y=::max(trackers[cid].mean.y-minstate.y,maxstate.y-trackers[cid].mean.y); trackers[cid].sigma.theta=::max(trackers[cid].mean.theta-minstate.theta,maxstate.theta-trackers[cid].mean.theta); trackers[cid].sigma.a=::max(trackers[cid].mean.a-minstate.a,maxstate.a-trackers[cid].mean.a); trackers[cid].sigma.v=::max(trackers[cid].mean.v-minstate.v,maxstate.v-trackers[cid].mean.v); trackers[cid].sigma.k=::max(trackers[cid].mean.k-minstate.k,maxstate.k-trackers[cid].mean.k); trackers[cid].sigma.omega=::max(trackers[cid].mean.omega-minstate.omega,maxstate.omega-trackers[cid].mean.omega); if(trackers[cid].sigma.x<SSPF_SIGMA_X&&trackers[cid].sigma.y<SSPF_SIGMA_Y&&trackers[cid].sigma.theta<SSPF_SIGMA_THETA) { if(trackers[cid].beamcount>SSPF_BEAMCOUNT) { trackers[cid].pfcount/=2; } else { trackers[cid].pfcount++; } trackers[cid].status=StatusUpdateTracker_SSPF; } else { trackers[cid].pfcount++; trackers[cid].status=StatusUpdateTracker_PF; } hostBuildModel(trackers[cid],beamnum); } if(i<pnum) { cid=particles[i].controlid; trackers[cid].mean.x=0; trackers[cid].mean.y=0; trackers[cid].mean.theta=0; trackers[cid].mean.a=0; trackers[cid].mean.v=0; trackers[cid].mean.k=0; trackers[cid].mean.omega=0; trackers[cid].beamcount=0; weightsum=0; minstate=particles[i].state; maxstate=particles[i].state; } else { break; } } weightsum+=particles[i].weight; trackers[cid].mean.x+=particles[i].state.x*particles[i].weight; trackers[cid].mean.y+=particles[i].state.y*particles[i].weight; trackers[cid].mean.theta+=particles[i].state.theta*particles[i].weight; trackers[cid].mean.a+=particles[i].state.a*particles[i].weight; trackers[cid].mean.v+=particles[i].state.v*particles[i].weight; trackers[cid].mean.k+=particles[i].state.k*particles[i].weight; trackers[cid].mean.omega+=particles[i].state.omega; trackers[cid].beamcount+=particles[i].beamcount*particles[i].weight; minstate.x=minstate.x<particles[i].state.x?minstate.x:particles[i].state.x; maxstate.x=maxstate.x>particles[i].state.x?maxstate.x:particles[i].state.x; minstate.y=minstate.y<particles[i].state.y?minstate.y:particles[i].state.y; maxstate.y=maxstate.y>particles[i].state.y?maxstate.y:particles[i].state.y; minstate.theta=minstate.theta<particles[i].state.theta?minstate.theta:particles[i].state.theta; maxstate.theta=maxstate.theta>particles[i].state.theta?maxstate.theta:particles[i].state.theta; minstate.a=minstate.a<particles[i].state.a?minstate.a:particles[i].state.a; maxstate.a=maxstate.a>particles[i].state.a?maxstate.a:particles[i].state.a; minstate.v=minstate.v<particles[i].state.v?minstate.v:particles[i].state.v; maxstate.v=maxstate.v>particles[i].state.v?maxstate.v:particles[i].state.v; minstate.k=minstate.k<particles[i].state.k?minstate.k:particles[i].state.k; maxstate.k=maxstate.k>particles[i].state.k?maxstate.k:particles[i].state.k; minstate.omega=minstate.omega<particles[i].state.omega?minstate.omega:particles[i].state.omega; maxstate.omega=maxstate.omega>particles[i].state.omega?maxstate.omega:particles[i].state.omega; } }
96a0b6c8f64f5ef27296f1b8035da49c1a0eaf86.cu
#include"rbsspf_motion.cuh" //==================================================== //1: init control and particles __host__ void hostCalculateMotionControl(TrackerSampleControl & control) { double ratio=1,vratio=1,maxratio=1; CALRATIO(ratio,vratio,maxratio,control.motionoffset.a,control.motionprec.a); CALRATIO(ratio,vratio,maxratio,control.motionoffset.v,control.motionprec.v); CALRATIO(ratio,vratio,maxratio,control.motionoffset.omega,control.motionprec.omega); control.motioniteration=log(vratio)/log(2); control.motionanneal=maxratio*maxratio; control.motionannealratio=pow(control.motionanneal,-1/control.motioniteration); CALZOOM(control.motionzoom.a,control.motionoffset.a,control.motionprec.a,control.motioniteration); CALZOOM(control.motionzoom.v,control.motionoffset.v,control.motionprec.v,control.motioniteration); CALZOOM(control.motionzoom.omega,control.motionoffset.omega,control.motionprec.omega,control.motioniteration); } __host__ bool hostInitializeMotion(Tracker & tracker, TrackerSampleControl & control) { control.motionmin=MOTIONMIN; control.motionmax=MOTIONMAX; control.motionprec=MOTIONPREC; switch(tracker.status) { case StatusInitMotion: { control.pfflag=0; control.motionoffset=INITMOTIONOFFSET; control.pnum=1; hostCalculateMotionControl(control); } break; case StatusUpdateTracker_SSPF: { control.pfflag=0; control.motionoffset=UPDATEMOTIONOFFSET_SSPF; control.pnum=1; hostCalculateMotionControl(control); } break; case StatusUpdateTracker_PF: { control.pfflag=1; control.motionoffset=UPDATEMOTIONOFFSET_PF; control.pnum=MRQPN; control.motioniteration=0; } break; default: { control.motioniteration=-1; control.pnum=0; } break; } return control.motioniteration>=0; } __host__ double hostInitMotionEstimation(int trackernum, std::vector<Tracker> & trackers, std::vector<TrackerSampleControl> & controls, int & pnum, std::vector<TrackerParticle> & particles) { double maxmotioniteration=-1; pnum=0; for(int i=0;i<trackernum;i++) { controls[i].id=i; if(hostInitializeMotion(trackers[i],controls[i])) { if(maxmotioniteration<controls[i].motioniteration) maxmotioniteration=controls[i].motioniteration; for(int j=0;j<controls[i].pnum;j++) { particles[pnum].state=trackers[i].mean; particles[pnum].controlid=i; particles[pnum].weight=0; particles[pnum].beamcount=0; pnum++; } } } return maxmotioniteration; } //==================================================== //2: upsample __host__ __device__ void deviceAckermannModel(TrackerParticle & particle, EgoMotion & egomotion) { double c=cos(particle.state.theta); double s=sin(particle.state.theta); if(particle.state.k==0) { particle.state.x=particle.state.x+c*particle.state.v*egomotion.dt; particle.state.y=particle.state.y+s*particle.state.v*egomotion.dt; particle.state.a=0; } else { double c0=cos(particle.state.theta+particle.state.a); double s0=sin(particle.state.theta+particle.state.a); particle.state.omega=particle.state.v*particle.state.k; double dtheta=particle.state.omega*egomotion.dt; particle.state.theta+=dtheta; double c1=cos(particle.state.theta+particle.state.a); double s1=sin(particle.state.theta+particle.state.a); double R=1/particle.state.k; particle.state.x=particle.state.x+R*(-s0+s1); particle.state.y=particle.state.y+R*(c0-c1); } } __host__ __device__ void deviceEgoMotion(TrackerParticle & particle, EgoMotion & egomotion) { double c=cos(egomotion.dtheta); double s=sin(egomotion.dtheta); double tmpx=c*particle.state.x-s*particle.state.y+egomotion.dx; double tmpy=s*particle.state.x+c*particle.state.y+egomotion.dy; particle.state.x=tmpx; particle.state.y=tmpy; particle.state.theta+=egomotion.dtheta; } __global__ void kernelMotionUpSample(TrackerParticle *particles, TrackerSampleControl *controls, TrackerParticle *tmpparticles, TrackerParticle *tmpparticles_forward, int tmppnum, thrust::random::minstd_rand *rng, EgoMotion egomotion, int beamnum, int *beamcount) { GetThreadID_1D(tmppid); if(tmppid>=tmppnum) return; int pid=int(tmppid/SPN); int cid=particles[pid].controlid; int rid=tmppid%RNGNUM; TrackerSampleControl control=controls[cid]; TrackerParticle particle=particles[pid]; if(control.motioniteration<1) { tmpparticles[tmppid]=particle; tmpparticles_forward[tmppid]=particle; beamcount[tmppid]=0; return; } if(control.pfflag) { particle.state.v=thrust::random::normal_distribution<double>(particle.state.v,control.motionoffset.v)(rng[rid]); particle.state.v=particle.state.v>control.motionmin.v?particle.state.v:control.motionmin.v; particle.state.v=particle.state.v<control.motionmax.v?particle.state.v:control.motionmax.v; particle.state.omega=thrust::random::normal_distribution<double>(particle.state.omega,control.motionoffset.omega)(rng[rid]); particle.state.omega=particle.state.omega>control.motionmin.omega?particle.state.omega:control.motionmin.omega; particle.state.omega=particle.state.omega<control.motionmax.omega?particle.state.omega:control.motionmax.omega; } else { double vmin=particle.state.v-control.motionoffset.v; vmin=vmin>control.motionmin.v?vmin:control.motionmin.v; double vmax=particle.state.v+control.motionoffset.v; vmax=vmax<control.motionmax.v?vmax:control.motionmax.v; particle.state.v=thrust::random::uniform_real_distribution<double>(vmin,vmax)(rng[rid]); double omegamin=particle.state.omega-control.motionoffset.omega; omegamin=omegamin>control.motionmin.omega?omegamin:control.motionmin.omega; double omegamax=particle.state.omega+control.motionoffset.omega; omegamax=omegamax<control.motionmax.omega?omegamax:control.motionmax.omega; particle.state.omega=thrust::random::uniform_real_distribution<double>(omegamin,omegamax)(rng[rid]); } if(particle.state.v==0) { particle.state.k=0; } else { particle.state.k=particle.state.omega/particle.state.v; particle.state.k=particle.state.k>control.motionmin.k?particle.state.k:control.motionmin.k; particle.state.k=particle.state.k<control.motionmax.k?particle.state.k:control.motionmax.k; } particle.state.omega=particle.state.v*particle.state.k; if(control.pfflag) { particle.state.a=thrust::random::normal_distribution<double>(particle.state.a,control.motionoffset.a)(rng[rid]); particle.state.a=particle.state.a>control.motionmin.a?particle.state.a:control.motionmin.a; particle.state.a=particle.state.a<control.motionmax.a?particle.state.a:control.motionmax.a; } else { double R,phi; if(particle.state.k!=0) { R=1/fabs(particle.state.k); phi=atan2(6.0,R); } double amin,amax; if(particle.state.omega>0) { amin=DEG2RAD(-30); amax=phi; amax=amax>amin?amax:amin; } else if(particle.state.omega<0) { amax=DEG2RAD(30); amin=-phi; amin=amin<amax?amin:amax; } else { amin=0; amax=0; } particle.state.a=thrust::random::uniform_real_distribution<double>(amin,amax)(rng[rid]); } tmpparticles[tmppid]=particle; deviceAckermannModel(particle,egomotion); deviceEgoMotion(particle,egomotion); deviceBuildModel(particle,beamnum); tmpparticles_forward[tmppid]=particle; tmpparticles[tmppid].geometry.validflag=particle.geometry.validflag; beamcount[tmppid]=particle.geometry.beamcount; } //==================================================== //8: estimate tracker __host__ void hostEstimateMotionTracker(int pnum, std::vector<TrackerParticle> & particles, std::vector<Tracker> & trackers, int beamnum) { TrackerState minstate; TrackerState maxstate; double weightsum; int cid=-1; for(int i=0;i<=pnum;i++) { bool flag=(i>=pnum)||(cid!=particles[i].controlid); if(flag) { if(cid>=0) { trackers[cid].mean.x/=weightsum; trackers[cid].mean.y/=weightsum; trackers[cid].mean.theta/=weightsum; trackers[cid].mean.a/=weightsum; trackers[cid].mean.v/=weightsum; trackers[cid].mean.k/=weightsum; trackers[cid].mean.omega/=weightsum; trackers[cid].beamcount/=weightsum; trackers[cid].sigma.x=std::max(trackers[cid].mean.x-minstate.x,maxstate.x-trackers[cid].mean.x); trackers[cid].sigma.y=std::max(trackers[cid].mean.y-minstate.y,maxstate.y-trackers[cid].mean.y); trackers[cid].sigma.theta=std::max(trackers[cid].mean.theta-minstate.theta,maxstate.theta-trackers[cid].mean.theta); trackers[cid].sigma.a=std::max(trackers[cid].mean.a-minstate.a,maxstate.a-trackers[cid].mean.a); trackers[cid].sigma.v=std::max(trackers[cid].mean.v-minstate.v,maxstate.v-trackers[cid].mean.v); trackers[cid].sigma.k=std::max(trackers[cid].mean.k-minstate.k,maxstate.k-trackers[cid].mean.k); trackers[cid].sigma.omega=std::max(trackers[cid].mean.omega-minstate.omega,maxstate.omega-trackers[cid].mean.omega); if(trackers[cid].sigma.x<SSPF_SIGMA_X&&trackers[cid].sigma.y<SSPF_SIGMA_Y&&trackers[cid].sigma.theta<SSPF_SIGMA_THETA) { if(trackers[cid].beamcount>SSPF_BEAMCOUNT) { trackers[cid].pfcount/=2; } else { trackers[cid].pfcount++; } trackers[cid].status=StatusUpdateTracker_SSPF; } else { trackers[cid].pfcount++; trackers[cid].status=StatusUpdateTracker_PF; } hostBuildModel(trackers[cid],beamnum); } if(i<pnum) { cid=particles[i].controlid; trackers[cid].mean.x=0; trackers[cid].mean.y=0; trackers[cid].mean.theta=0; trackers[cid].mean.a=0; trackers[cid].mean.v=0; trackers[cid].mean.k=0; trackers[cid].mean.omega=0; trackers[cid].beamcount=0; weightsum=0; minstate=particles[i].state; maxstate=particles[i].state; } else { break; } } weightsum+=particles[i].weight; trackers[cid].mean.x+=particles[i].state.x*particles[i].weight; trackers[cid].mean.y+=particles[i].state.y*particles[i].weight; trackers[cid].mean.theta+=particles[i].state.theta*particles[i].weight; trackers[cid].mean.a+=particles[i].state.a*particles[i].weight; trackers[cid].mean.v+=particles[i].state.v*particles[i].weight; trackers[cid].mean.k+=particles[i].state.k*particles[i].weight; trackers[cid].mean.omega+=particles[i].state.omega; trackers[cid].beamcount+=particles[i].beamcount*particles[i].weight; minstate.x=minstate.x<particles[i].state.x?minstate.x:particles[i].state.x; maxstate.x=maxstate.x>particles[i].state.x?maxstate.x:particles[i].state.x; minstate.y=minstate.y<particles[i].state.y?minstate.y:particles[i].state.y; maxstate.y=maxstate.y>particles[i].state.y?maxstate.y:particles[i].state.y; minstate.theta=minstate.theta<particles[i].state.theta?minstate.theta:particles[i].state.theta; maxstate.theta=maxstate.theta>particles[i].state.theta?maxstate.theta:particles[i].state.theta; minstate.a=minstate.a<particles[i].state.a?minstate.a:particles[i].state.a; maxstate.a=maxstate.a>particles[i].state.a?maxstate.a:particles[i].state.a; minstate.v=minstate.v<particles[i].state.v?minstate.v:particles[i].state.v; maxstate.v=maxstate.v>particles[i].state.v?maxstate.v:particles[i].state.v; minstate.k=minstate.k<particles[i].state.k?minstate.k:particles[i].state.k; maxstate.k=maxstate.k>particles[i].state.k?maxstate.k:particles[i].state.k; minstate.omega=minstate.omega<particles[i].state.omega?minstate.omega:particles[i].state.omega; maxstate.omega=maxstate.omega>particles[i].state.omega?maxstate.omega:particles[i].state.omega; } }
6dc304cffa21175d15a98a21ec9e32b7c6808086.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> struct __align__(16) Retina1{ int input_number; int kernelsize; int postneuron_number; }; //neurontype:retina __global__ static void Retina_CNN(int *input,struct Retina1 *retina,int *THREAD_NUM,struct neuron_I *Ix) { const int tid = threadIdx.x; const int bid = blockIdx.x; int number=(bid * (*THREAD_NUM) + tid)*10; float I; int i=0; for(i=0;i<10;i++) { I=input[retina[number+i].input_number]; Ix[retina[number+i].postneuron_number].I=(int)I*10; //if(I>100){printf("%d=%d\n",retina[number+i].postneuron_number,(int)I);} } } int Distribution_input_func(int *input_type,int *Postlayer,int *kernelsize,int *X_max,int *Y_max,int maxline,int *layer_X_max,int *layer_Y_max,int *THREAD,int *BLOCK) { int i=0; int error=0; int num=0; for(i=0;i<maxline;i++) { if(X_max[i]!=X_max[0]||Y_max[i]!=Y_max[0]) { printf("all X_max must same and all Y_max must same,please check input txt\n"); } if(input_type[i]!=7) { printf("input neuron type error i=%d\n",i); } if(X_max[i]!=layer_X_max[Postlayer[i]] || Y_max[i]!=layer_Y_max[Postlayer[i]]) { error=-1;printf("X_max or Y_max error i=%d\n", i);break; } if(kernelsize[i]>=X_max[i] || kernelsize[i]>=X_max[i] || kernelsize[i]<=0) { error=-1;printf("kernelsize error i=%d\n", i);break; } num+=X_max[i]*Y_max[i]; } printf("retina input number = %d\n",num); if(num==0) { *THREAD=0; *BLOCK=0; printf("no input\n"); error=-1; } else if(num%1280==0) { *BLOCK=num/1280; *THREAD=128; } else if(num%960==0) { *BLOCK=num/960; *THREAD=96; } else if(num%640==0) { *BLOCK=num/640; *THREAD=64; } else if(num%320==0) { *BLOCK=num/320; *THREAD=32; } else { printf("error THREAD_NUM \n"); printf("please confirm input is 32X and 10X\n"); error=-1; } return error; } int connect_Retina_func(struct Retina1 *Retina_neuron,int *Postlayer,int *kernelsize,int *X_max,int *Y_max,int maxline,struct axon *neuron_copy) { int i=0; int j=0; int N=0; int post_addr; int error=0; for(i=0;i<maxline;i++) { j=0; while(1) { if(neuron_copy[j].layer==Postlayer[i]) //neuronbox {post_addr=j;j=0;break;} j++; } printf("i=%d\n",i); printf("post_addr=%d\n",post_addr); printf("Postlayer=%d\n",Postlayer[i]); for(j=0;j<X_max[i]*Y_max[i];j++) { Retina_neuron[j+N].input_number=j; Retina_neuron[j+N].kernelsize=kernelsize[i]; Retina_neuron[j+N].postneuron_number=post_addr+j; if(neuron_copy[post_addr+j].layer!=Postlayer[i]) { printf("error: i=%d\n",i); printf("numbers=%d\n",j+N); printf("post_addr=%d\n",post_addr+j); printf("post_layer=%d\n",neuron_copy[post_addr+j].layer); error=-1;break; } } N+=X_max[i]*Y_max[i]; } return error; }
6dc304cffa21175d15a98a21ec9e32b7c6808086.cu
#include "cuda_runtime.h" #include <stdio.h> struct __align__(16) Retina1{ int input_number; int kernelsize; int postneuron_number; }; //neurontype:retina __global__ static void Retina_CNN(int *input,struct Retina1 *retina,int *THREAD_NUM,struct neuron_I *Ix) { const int tid = threadIdx.x; const int bid = blockIdx.x; int number=(bid * (*THREAD_NUM) + tid)*10; float I; int i=0; for(i=0;i<10;i++) { I=input[retina[number+i].input_number]; Ix[retina[number+i].postneuron_number].I=(int)I*10; //if(I>100){printf("%d=%d\n",retina[number+i].postneuron_number,(int)I);} } } int Distribution_input_func(int *input_type,int *Postlayer,int *kernelsize,int *X_max,int *Y_max,int maxline,int *layer_X_max,int *layer_Y_max,int *THREAD,int *BLOCK) { int i=0; int error=0; int num=0; for(i=0;i<maxline;i++) { if(X_max[i]!=X_max[0]||Y_max[i]!=Y_max[0]) { printf("all X_max must same and all Y_max must same,please check input txt\n"); } if(input_type[i]!=7) { printf("input neuron type error i=%d\n",i); } if(X_max[i]!=layer_X_max[Postlayer[i]] || Y_max[i]!=layer_Y_max[Postlayer[i]]) { error=-1;printf("X_max or Y_max error i=%d\n", i);break; } if(kernelsize[i]>=X_max[i] || kernelsize[i]>=X_max[i] || kernelsize[i]<=0) { error=-1;printf("kernelsize error i=%d\n", i);break; } num+=X_max[i]*Y_max[i]; } printf("retina input number = %d\n",num); if(num==0) { *THREAD=0; *BLOCK=0; printf("no input\n"); error=-1; } else if(num%1280==0) { *BLOCK=num/1280; *THREAD=128; } else if(num%960==0) { *BLOCK=num/960; *THREAD=96; } else if(num%640==0) { *BLOCK=num/640; *THREAD=64; } else if(num%320==0) { *BLOCK=num/320; *THREAD=32; } else { printf("error THREAD_NUM \n"); printf("please confirm input is 32X and 10X\n"); error=-1; } return error; } int connect_Retina_func(struct Retina1 *Retina_neuron,int *Postlayer,int *kernelsize,int *X_max,int *Y_max,int maxline,struct axon *neuron_copy) { int i=0; int j=0; int N=0; int post_addr; int error=0; for(i=0;i<maxline;i++) { j=0; while(1) { if(neuron_copy[j].layer==Postlayer[i]) //寻找突触后神经元对应neuronbox首地址 {post_addr=j;j=0;break;} j++; } printf("i=%d\n",i); printf("post_addr=%d\n",post_addr); printf("Postlayer=%d\n",Postlayer[i]); for(j=0;j<X_max[i]*Y_max[i];j++) { Retina_neuron[j+N].input_number=j; Retina_neuron[j+N].kernelsize=kernelsize[i]; Retina_neuron[j+N].postneuron_number=post_addr+j; if(neuron_copy[post_addr+j].layer!=Postlayer[i]) { printf("error: i=%d\n",i); printf("numbers=%d\n",j+N); printf("post_addr=%d\n",post_addr+j); printf("post_layer=%d\n",neuron_copy[post_addr+j].layer); error=-1;break; } } N+=X_max[i]*Y_max[i]; } return error; }
185e86a6649138057d93cf7e21e61b9359c5f0a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * http://github.com/dusty-nv */ #include "cudaRGB.h" //------------------------------------------------------------------------------------------------------------------------- __global__ void RGBToRGBAf(uchar3* srcImage, float4* dstImage, uint32_t width, uint32_t height) { int x, y, pixel; x = (blockIdx.x * blockDim.x) + threadIdx.x; y = (blockIdx.y * blockDim.y) + threadIdx.y; pixel = y * width + x; if (x >= width) return; if (y >= height) return; // printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel); const float s = 1.0f; const uchar3 px = srcImage[pixel]; dstImage[pixel] = make_float4(px.x * s, px.y * s, px.z * s, 255.0f * s); } hipError_t cudaRGBToRGBAf( uchar3* srcDev, float4* destDev, size_t width, size_t height ) { if( !srcDev || !destDev ) return hipErrorInvalidDevicePointer; const dim3 blockDim(8,8,1); const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y), 1); hipLaunchKernelGGL(( RGBToRGBAf), dim3(gridDim), dim3(blockDim), 0, 0, srcDev, destDev, width, height ); return CUDA(hipGetLastError()); }
185e86a6649138057d93cf7e21e61b9359c5f0a9.cu
/* * http://github.com/dusty-nv */ #include "cudaRGB.h" //------------------------------------------------------------------------------------------------------------------------- __global__ void RGBToRGBAf(uchar3* srcImage, float4* dstImage, uint32_t width, uint32_t height) { int x, y, pixel; x = (blockIdx.x * blockDim.x) + threadIdx.x; y = (blockIdx.y * blockDim.y) + threadIdx.y; pixel = y * width + x; if (x >= width) return; if (y >= height) return; // printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel); const float s = 1.0f; const uchar3 px = srcImage[pixel]; dstImage[pixel] = make_float4(px.x * s, px.y * s, px.z * s, 255.0f * s); } cudaError_t cudaRGBToRGBAf( uchar3* srcDev, float4* destDev, size_t width, size_t height ) { if( !srcDev || !destDev ) return cudaErrorInvalidDevicePointer; const dim3 blockDim(8,8,1); const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y), 1); RGBToRGBAf<<<gridDim, blockDim>>>( srcDev, destDev, width, height ); return CUDA(cudaGetLastError()); }
3ee25c22a45cab1022c3bfd1b3c5251127006b19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "thrust/device_ptr.h" #include "thrust/remove.h" #include "thrust/functional.h" #include "internal_shared.hpp" using namespace thrust; namespace cv { namespace gpu { namespace device { namespace globmotion { __constant__ float cml[9]; __constant__ float cmr[9]; int compactPoints(int N, float *points0, float *points1, const uchar *mask) { thrust::device_ptr<float2> dpoints0((float2*)points0); thrust::device_ptr<float2> dpoints1((float2*)points1); thrust::device_ptr<const uchar> dmask(mask); return thrust::remove_if(thrust::make_zip_iterator(thrust::make_tuple(dpoints0, dpoints1)), thrust::make_zip_iterator(thrust::make_tuple(dpoints0 + N, dpoints1 + N)), dmask, thrust::not1(thrust::identity<uchar>())) - make_zip_iterator(make_tuple(dpoints0, dpoints1)); } __global__ void calcWobbleSuppressionMapsKernel( const int left, const int idx, const int right, const int width, const int height, PtrStepf mapx, PtrStepf mapy) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if (x < width && y < height) { float xl = cml[0]*x + cml[1]*y + cml[2]; float yl = cml[3]*x + cml[4]*y + cml[5]; float izl = 1.f / (cml[6]*x + cml[7]*y + cml[8]); xl *= izl; yl *= izl; float xr = cmr[0]*x + cmr[1]*y + cmr[2]; float yr = cmr[3]*x + cmr[4]*y + cmr[5]; float izr = 1.f / (cmr[6]*x + cmr[7]*y + cmr[8]); xr *= izr; yr *= izr; float wl = idx - left; float wr = right - idx; mapx(y,x) = (wr * xl + wl * xr) / (wl + wr); mapy(y,x) = (wr * yl + wl * yr) / (wl + wr); } } void calcWobbleSuppressionMaps( int left, int idx, int right, int width, int height, const float *ml, const float *mr, PtrStepSzf mapx, PtrStepSzf mapy) { cudaSafeCall(hipMemcpyToSymbol(cml, ml, 9*sizeof(float))); cudaSafeCall(hipMemcpyToSymbol(cmr, mr, 9*sizeof(float))); dim3 threads(32, 8); dim3 grid(divUp(width, threads.x), divUp(height, threads.y)); hipLaunchKernelGGL(( calcWobbleSuppressionMapsKernel), dim3(grid), dim3(threads), 0, 0, left, idx, right, width, height, mapx, mapy); cudaSafeCall(hipGetLastError()); cudaSafeCall(hipDeviceSynchronize()); } }}}} #endif /* CUDA_DISABLER */
3ee25c22a45cab1022c3bfd1b3c5251127006b19.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "thrust/device_ptr.h" #include "thrust/remove.h" #include "thrust/functional.h" #include "internal_shared.hpp" using namespace thrust; namespace cv { namespace gpu { namespace device { namespace globmotion { __constant__ float cml[9]; __constant__ float cmr[9]; int compactPoints(int N, float *points0, float *points1, const uchar *mask) { thrust::device_ptr<float2> dpoints0((float2*)points0); thrust::device_ptr<float2> dpoints1((float2*)points1); thrust::device_ptr<const uchar> dmask(mask); return thrust::remove_if(thrust::make_zip_iterator(thrust::make_tuple(dpoints0, dpoints1)), thrust::make_zip_iterator(thrust::make_tuple(dpoints0 + N, dpoints1 + N)), dmask, thrust::not1(thrust::identity<uchar>())) - make_zip_iterator(make_tuple(dpoints0, dpoints1)); } __global__ void calcWobbleSuppressionMapsKernel( const int left, const int idx, const int right, const int width, const int height, PtrStepf mapx, PtrStepf mapy) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if (x < width && y < height) { float xl = cml[0]*x + cml[1]*y + cml[2]; float yl = cml[3]*x + cml[4]*y + cml[5]; float izl = 1.f / (cml[6]*x + cml[7]*y + cml[8]); xl *= izl; yl *= izl; float xr = cmr[0]*x + cmr[1]*y + cmr[2]; float yr = cmr[3]*x + cmr[4]*y + cmr[5]; float izr = 1.f / (cmr[6]*x + cmr[7]*y + cmr[8]); xr *= izr; yr *= izr; float wl = idx - left; float wr = right - idx; mapx(y,x) = (wr * xl + wl * xr) / (wl + wr); mapy(y,x) = (wr * yl + wl * yr) / (wl + wr); } } void calcWobbleSuppressionMaps( int left, int idx, int right, int width, int height, const float *ml, const float *mr, PtrStepSzf mapx, PtrStepSzf mapy) { cudaSafeCall(cudaMemcpyToSymbol(cml, ml, 9*sizeof(float))); cudaSafeCall(cudaMemcpyToSymbol(cmr, mr, 9*sizeof(float))); dim3 threads(32, 8); dim3 grid(divUp(width, threads.x), divUp(height, threads.y)); calcWobbleSuppressionMapsKernel<<<grid, threads>>>( left, idx, right, width, height, mapx, mapy); cudaSafeCall(cudaGetLastError()); cudaSafeCall(cudaDeviceSynchronize()); } }}}} #endif /* CUDA_DISABLER */
af6452e49467f4074a5a268d46b57870245ff301.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void addtoall(int* a, int b) { int i = threadIdx.x; atomicAdd(&(a[i]), b); }
af6452e49467f4074a5a268d46b57870245ff301.cu
#include "includes.h" __global__ void addtoall(int* a, int b) { int i = threadIdx.x; atomicAdd(&(a[i]), b); }
2fd3a169778b481de6e9bf8cdc99c019ebea8964.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define NDEBUG #include <chrono> #include <stdio.h> #include <assert.h> #include <inttypes.h> #include "wa-tor/aos/wator.h" //#include "wa-tor/aos/halloc_allocator.h" //#include "wa-tor/aos/scatteralloc_allocator.h" #include "wa-tor/aos/aos_allocator.h" //#include "wa-tor/aos/cuda_allocator.h" //#include "wa-tor/aos/mallocmc_allocator.h" #define SPAWN_THRESHOLD 4 #define ENERGY_BOOST 4 #define ENERGY_START 2 #define GRID_SIZE_X 2048 #define GRID_SIZE_Y 1024 #define THREADS_PER_BLOCK 256 #define OPTION_SHARK_DIE true #define OPTION_SHARK_SPAWN true #define OPTION_FISH_SPAWN true namespace wa_tor { __device__ uint32_t random_number(uint32_t* state, uint32_t max) { // Advance and return random state. // Source: https://en.wikipedia.org/wiki/Lehmer_random_number_generator assert(*state != 0); *state = static_cast<uint32_t>( static_cast<uint64_t>(*state) * 1103515245u + 12345) % 2147483648u; return ((*state) >> 7) % max; } __device__ uint32_t random_number(uint32_t* state) { // Advance and return random state. // Source: https://en.wikipedia.org/wiki/Lehmer_random_number_generator assert(*state != 0); *state = static_cast<uint32_t>( static_cast<uint64_t>(*state) * 1103515245u + 12345) % 2147483648u; return ((*state) >> 7); } __device__ Cell::Cell(uint32_t random_state) : random_state_(random_state), agent_(nullptr) { assert(random_state != 0); prepare(); } __device__ Agent* Cell::agent() const { return agent_; } __device__ void Cell::decide() { if (neighbor_request_[4]) { // This cell has priority. agent_->set_new_position(this); } else { uint8_t candidates[4]; uint8_t num_candidates = 0; for (int i = 0; i < 4; ++i) { if (neighbor_request_[i]) { candidates[num_candidates++] = i; } } if (num_candidates > 0) { uint32_t selected_index = random_number(&random_state_, num_candidates); neighbors_[candidates[selected_index]]->agent()->set_new_position(this); } } } __device__ void Cell::enter(Agent* agent) { assert(agent_ == nullptr); #ifndef NDEBUG // Ensure that no two agents are trying to enter this cell at the same time. uint64_t old_val = atomicExch(reinterpret_cast<unsigned long long int*>(&agent_), reinterpret_cast<unsigned long long int>(agent)); assert(old_val == 0); #else agent_ = agent; #endif agent->set_position(this); } __device__ bool Cell::has_fish() const { return agent_ != nullptr && agent_->type_identifier() == Fish::kTypeId; } __device__ bool Cell::has_shark() const { return agent_ != nullptr && agent_->type_identifier() == Shark::kTypeId; } __device__ bool Cell::is_free() const { return agent_ == nullptr; } __device__ void Cell::leave() { assert(agent_ != nullptr); agent_ = nullptr; } __device__ void Cell::prepare() { for (int i = 0; i < 5; ++i) { neighbor_request_[i] = false; } } __device__ uint32_t* Cell::random_state() { return &random_state_; } __device__ void Cell::request_random_fish_neighbor() { if (!request_random_neighbor<&Cell::has_fish>(agent_->random_state())) { // No fish found. Look for free cell. if (!request_random_neighbor<&Cell::is_free>(agent_->random_state())) { neighbor_request_[4] = true; } } } __device__ void Cell::request_random_free_neighbor() { if (!request_random_neighbor<&Cell::is_free>(agent_->random_state())) { neighbor_request_[4] = true; } } template<bool(Cell::*predicate)() const> __device__ bool Cell::request_random_neighbor(uint32_t* random_state) { uint8_t candidates[4]; uint8_t num_candidates = 0; for (int i = 0; i < 4; ++i) { if ((neighbors_[i]->*predicate)()) { candidates[num_candidates++] = i; } } if (num_candidates == 0) { return false; } else { uint32_t selected_index = random_number(random_state, num_candidates); uint8_t selected = candidates[selected_index]; uint8_t neighbor_index = (selected + 2) % 4; neighbors_[selected]->neighbor_request_[neighbor_index] = true; // Check correctness of neighbor calculation. assert(neighbors_[selected]->neighbors_[neighbor_index] == this); return true; } } __device__ void Cell::set_neighbors(Cell* left, Cell* top, Cell* right, Cell* bottom) { neighbors_[0] = left; neighbors_[1] = top; neighbors_[2] = right; neighbors_[3] = bottom; } __device__ Agent::Agent(uint32_t random_state, uint8_t type_identifier) : random_state_(random_state), type_identifier_(type_identifier) { assert(random_state != 0); } __device__ uint32_t* Agent::random_state() { return &random_state_; } __device__ void Agent::set_new_position(Cell* new_pos) { // Check for race condition. (This is not bullet proof.) assert(new_position_ == position_); new_position_ = new_pos; } __device__ Cell* Agent::position() const { return position_; } __device__ void Agent::set_position(Cell* cell) { position_ = cell; } // TODO: Verify that RTTI (dynamic_cast) does not work in device code. __device__ uint8_t Agent::type_identifier() const { return type_identifier_; } __device__ Fish::Fish(uint32_t random_state) : Agent(random_state, kTypeId), egg_timer_(random_state % SPAWN_THRESHOLD) { assert(random_state != 0); } __device__ void Fish::prepare() { assert(type_identifier() == kTypeId); egg_timer_++; // Fallback: Stay on current cell. new_position_ = position_; assert(position_ != nullptr); position_->request_random_free_neighbor(); } __device__ void Fish::update() { assert(type_identifier() == kTypeId); Cell* old_position = position_; if (old_position != new_position_) { old_position->leave(); new_position_->enter(this); if (OPTION_FISH_SPAWN && egg_timer_ > SPAWN_THRESHOLD) { uint32_t new_random_state = random_number(&random_state_) + 401; new_random_state = new_random_state != 0 ? new_random_state : random_state_; auto* new_fish = allocate<Fish>(new_random_state); assert(new_fish != nullptr); old_position->enter(new_fish); egg_timer_ = 0; } } } __device__ Shark::Shark(uint32_t random_state) : Agent(random_state, kTypeId), energy_(ENERGY_START), egg_timer_(random_state % SPAWN_THRESHOLD) { assert(random_state_ != 0); } __device__ void Shark::prepare() { assert(type_identifier() == kTypeId); egg_timer_++; energy_--; assert(position_ != nullptr); if (OPTION_SHARK_DIE && energy_ == 0) { // Do nothing. Shark will die. } else { // Fallback: Stay on current cell. new_position_ = position_; position_->request_random_fish_neighbor(); } } __device__ void Shark::update() { assert(type_identifier() == kTypeId); if (OPTION_SHARK_DIE && energy_ == 0) { position_->kill(); } else { Cell* old_position = position_; if (old_position != new_position_) { if (new_position_->has_fish()) { energy_ += ENERGY_BOOST; new_position_->kill(); } old_position->leave(); new_position_->enter(this); if (OPTION_SHARK_SPAWN && egg_timer_ > SPAWN_THRESHOLD) { assert(random_state_ != 0); uint32_t new_random_state = random_number(&random_state_) + 601; new_random_state = new_random_state != 0 ? new_random_state : random_state_; auto* new_shark = allocate<Shark>(new_random_state); assert(new_shark != nullptr); old_position->enter(new_shark); egg_timer_ = 0; } } } } __device__ void Cell::kill() { assert(agent_ != nullptr); if (agent_->type_identifier() == 1) { deallocate_untyped<1>(agent_); } else if (agent_->type_identifier() == 2) { deallocate_untyped<2>(agent_); } else { // Unknown type. assert(false); } agent_ = nullptr; } // ----- KERNELS ----- __device__ Cell* cells[GRID_SIZE_X * GRID_SIZE_Y]; __global__ void create_cells() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < GRID_SIZE_Y*GRID_SIZE_X) { int x = tid % GRID_SIZE_X; int y = tid / GRID_SIZE_X; float init_state = __logf(tid + 401); uint32_t init_state_int = *reinterpret_cast<uint32_t*>(&init_state); // Cell* new_cell = new Cell(init_state_int); Cell* new_cell = allocate<Cell>(601*x*x*y + init_state_int); assert(new_cell != nullptr); cells[tid] = new_cell; } } __global__ void setup_cells() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < GRID_SIZE_Y*GRID_SIZE_X) { int x = tid % GRID_SIZE_X; int y = tid / GRID_SIZE_X; Cell* left = x > 0 ? cells[y*GRID_SIZE_X + x - 1] : cells[y*GRID_SIZE_X + GRID_SIZE_X - 1]; Cell* right = x < GRID_SIZE_X - 1 ? cells[y*GRID_SIZE_X + x + 1] : cells[y*GRID_SIZE_X]; Cell* top = y > 0 ? cells[(y - 1)*GRID_SIZE_X + x] : cells[(GRID_SIZE_Y - 1)*GRID_SIZE_X + x]; Cell* bottom = y < GRID_SIZE_Y - 1 ? cells[(y + 1)*GRID_SIZE_X + x] : cells[x]; // left, top, right, bottom cells[tid]->set_neighbors(left, top, right, bottom); // Initialize with random agent. uint32_t agent_type = random_number(cells[tid]->random_state(), 4); if (agent_type == 0) { auto* agent = allocate<Fish>(*(cells[tid]->random_state())); assert(agent != nullptr); cells[tid]->enter(agent); } else if (agent_type == 1) { auto* agent = allocate<Shark>(*(cells[tid]->random_state())); assert(agent != nullptr); cells[tid]->enter(agent); } else { // Free cell. } } } // Problem: It is not easy to keep track of all objects of a class if they are // dynamically allocated. But we want to benchmark the performance of new/ // delete in CUDA. // Solution: Fill these arrays in a separate kernel by iterating over all // cells, storing agents in the respective array slots, and compacting the // arrays. We do not measure the performance of these steps. __device__ uint32_t num_sharks = 0; __device__ Shark* sharks[GRID_SIZE_Y * GRID_SIZE_X]; __device__ uint32_t num_fish = 0; __device__ Fish* fish[GRID_SIZE_Y * GRID_SIZE_X]; __global__ void print_checksum() { uint64_t chksum = 0; // Sorting of the array does not matter in the calculation here. for (int i = 0; i < num_sharks; ++i) { chksum += *(sharks[i]->position()->random_state()) % 601; } for (int i = 0; i < num_fish; ++i) { chksum += *(fish[i]->position()->random_state()) % 601; } printf("%" PRIu64 "\n", chksum); } __global__ void reset_fish_array() { num_fish = 0; } __global__ void reset_shark_array() { num_sharks = 0; } // One thread per cell. __global__ void find_fish() { assert(gridDim.x * blockDim.x == 1); num_fish = 0; for (int i = 0; i < decltype(memory_allocator)::kN; ++i) { if (memory_allocator.is_allocated<Fish>(i)) { fish[num_fish++] = memory_allocator.get_obj<Fish>(i); } } } __global__ void find_sharks() { assert(gridDim.x * blockDim.x == 1); num_sharks = 0; for (int i = 0; i < decltype(memory_allocator)::kN; ++i) { if (memory_allocator.is_allocated<Shark>(i)) { sharks[num_sharks++] = memory_allocator.get_obj<Shark>(i); } } } __global__ void find_cells() { assert(gridDim.x * blockDim.x == 1); int num_cells = 0; for (int i = 0; i < decltype(memory_allocator)::kN; ++i) { if (memory_allocator.is_allocated<Cell>(i)) { cells[num_cells++] = memory_allocator.get_obj<Cell>(i); } } } void generate_fish_array() { hipLaunchKernelGGL(( find_fish), dim3(1),dim3(1), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); } void generate_shark_array() { hipLaunchKernelGGL(( find_sharks), dim3(1),dim3(1), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); } __global__ void cell_prepare() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < GRID_SIZE_Y*GRID_SIZE_X) { cells[tid]->prepare(); } } __global__ void cell_decide() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < GRID_SIZE_Y*GRID_SIZE_X) { cells[tid]->decide(); } } __global__ void fish_prepare() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < num_fish) { assert(fish[tid] != nullptr); fish[tid]->prepare(); } } __global__ void fish_update() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < num_fish) { assert(fish[tid] != nullptr); fish[tid]->update(); } } __global__ void shark_prepare() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < num_sharks) { assert(sharks[tid] != nullptr); sharks[tid]->prepare(); } } __global__ void shark_update() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < num_sharks) { assert(sharks[tid] != nullptr); sharks[tid]->update(); } } void generate_shark_fish_arrays() { generate_fish_array(); generate_shark_array(); } void step() { hipLaunchKernelGGL(( cell_prepare), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( fish_prepare), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( cell_decide), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( fish_update), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( cell_prepare), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( shark_prepare), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( cell_decide), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( shark_update), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); } __global__ void init_memory_system() { initialize_allocator(); } void initialize() { //init the heap initHeap(512*1024U*1024U); hipLaunchKernelGGL(( init_memory_system), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( create_cells), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( setup_cells), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( find_cells), dim3(1),dim3(1), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); } __device__ uint32_t d_gui_map[GRID_SIZE_Y * GRID_SIZE_X]; uint32_t gui_map[GRID_SIZE_Y * GRID_SIZE_X]; __global__ void fill_gui_map() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < GRID_SIZE_Y*GRID_SIZE_X) { if (cells[tid]->agent() != nullptr) { d_gui_map[tid] = cells[tid]->agent()->type_identifier(); } else { d_gui_map[tid] = 0; } } } void update_gui_map() { hipLaunchKernelGGL(( fill_gui_map), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipMemcpyFromSymbol(gui_map, d_gui_map, sizeof(uint32_t)*GRID_SIZE_X*GRID_SIZE_Y, 0, hipMemcpyDeviceToHost); gpuErrchk(hipDeviceSynchronize()); } int h_num_fish = 0; int h_num_sharks = 0; void print_stats() { generate_fish_array(); generate_shark_array(); //printf("\n Fish: %i, Sharks: %i CHKSUM: ", h_num_fish, h_num_sharks); hipLaunchKernelGGL(( print_checksum), dim3(1), dim3(1), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); } int main(int argc, char* arvg[]) { //hipDeviceSetLimit(hipLimitMallocHeapSize, 256*1024*1024); initialize(); size_t heap_size; hipDeviceGetLimit(&heap_size, hipLimitMallocHeapSize); printf("CUDA heap size: %lu\n", heap_size); //printf("Computing...\n"); //int time_running = 0; for (int i = 0; i<1000; ++i) { if (i%50==0) { //print_stats(); //render(); //printf(" Time: %i usec", time_running); //time_running = 0; } generate_shark_fish_arrays(); // Printing: RUNNING TIME, NUM_FISH, NUM_SHARKS, CHKSUM, FISH_USE, FISH_ALLOC, SHARK_USE, SHARK_ALLOC auto time_before = std::chrono::system_clock::now(); step(); auto time_after = std::chrono::system_clock::now(); int time_running = std::chrono::duration_cast<std::chrono::microseconds>( time_after - time_before).count(); printf("%i,", time_running); print_stats(); //printf("\n"); } return 0; } } // namespace wa_tor int main(int argc, char* arvg[]) { return wa_tor::main(0, nullptr); }
2fd3a169778b481de6e9bf8cdc99c019ebea8964.cu
#define NDEBUG #include <chrono> #include <stdio.h> #include <assert.h> #include <inttypes.h> #include "wa-tor/aos/wator.h" //#include "wa-tor/aos/halloc_allocator.h" //#include "wa-tor/aos/scatteralloc_allocator.h" #include "wa-tor/aos/aos_allocator.h" //#include "wa-tor/aos/cuda_allocator.h" //#include "wa-tor/aos/mallocmc_allocator.h" #define SPAWN_THRESHOLD 4 #define ENERGY_BOOST 4 #define ENERGY_START 2 #define GRID_SIZE_X 2048 #define GRID_SIZE_Y 1024 #define THREADS_PER_BLOCK 256 #define OPTION_SHARK_DIE true #define OPTION_SHARK_SPAWN true #define OPTION_FISH_SPAWN true namespace wa_tor { __device__ uint32_t random_number(uint32_t* state, uint32_t max) { // Advance and return random state. // Source: https://en.wikipedia.org/wiki/Lehmer_random_number_generator assert(*state != 0); *state = static_cast<uint32_t>( static_cast<uint64_t>(*state) * 1103515245u + 12345) % 2147483648u; return ((*state) >> 7) % max; } __device__ uint32_t random_number(uint32_t* state) { // Advance and return random state. // Source: https://en.wikipedia.org/wiki/Lehmer_random_number_generator assert(*state != 0); *state = static_cast<uint32_t>( static_cast<uint64_t>(*state) * 1103515245u + 12345) % 2147483648u; return ((*state) >> 7); } __device__ Cell::Cell(uint32_t random_state) : random_state_(random_state), agent_(nullptr) { assert(random_state != 0); prepare(); } __device__ Agent* Cell::agent() const { return agent_; } __device__ void Cell::decide() { if (neighbor_request_[4]) { // This cell has priority. agent_->set_new_position(this); } else { uint8_t candidates[4]; uint8_t num_candidates = 0; for (int i = 0; i < 4; ++i) { if (neighbor_request_[i]) { candidates[num_candidates++] = i; } } if (num_candidates > 0) { uint32_t selected_index = random_number(&random_state_, num_candidates); neighbors_[candidates[selected_index]]->agent()->set_new_position(this); } } } __device__ void Cell::enter(Agent* agent) { assert(agent_ == nullptr); #ifndef NDEBUG // Ensure that no two agents are trying to enter this cell at the same time. uint64_t old_val = atomicExch(reinterpret_cast<unsigned long long int*>(&agent_), reinterpret_cast<unsigned long long int>(agent)); assert(old_val == 0); #else agent_ = agent; #endif agent->set_position(this); } __device__ bool Cell::has_fish() const { return agent_ != nullptr && agent_->type_identifier() == Fish::kTypeId; } __device__ bool Cell::has_shark() const { return agent_ != nullptr && agent_->type_identifier() == Shark::kTypeId; } __device__ bool Cell::is_free() const { return agent_ == nullptr; } __device__ void Cell::leave() { assert(agent_ != nullptr); agent_ = nullptr; } __device__ void Cell::prepare() { for (int i = 0; i < 5; ++i) { neighbor_request_[i] = false; } } __device__ uint32_t* Cell::random_state() { return &random_state_; } __device__ void Cell::request_random_fish_neighbor() { if (!request_random_neighbor<&Cell::has_fish>(agent_->random_state())) { // No fish found. Look for free cell. if (!request_random_neighbor<&Cell::is_free>(agent_->random_state())) { neighbor_request_[4] = true; } } } __device__ void Cell::request_random_free_neighbor() { if (!request_random_neighbor<&Cell::is_free>(agent_->random_state())) { neighbor_request_[4] = true; } } template<bool(Cell::*predicate)() const> __device__ bool Cell::request_random_neighbor(uint32_t* random_state) { uint8_t candidates[4]; uint8_t num_candidates = 0; for (int i = 0; i < 4; ++i) { if ((neighbors_[i]->*predicate)()) { candidates[num_candidates++] = i; } } if (num_candidates == 0) { return false; } else { uint32_t selected_index = random_number(random_state, num_candidates); uint8_t selected = candidates[selected_index]; uint8_t neighbor_index = (selected + 2) % 4; neighbors_[selected]->neighbor_request_[neighbor_index] = true; // Check correctness of neighbor calculation. assert(neighbors_[selected]->neighbors_[neighbor_index] == this); return true; } } __device__ void Cell::set_neighbors(Cell* left, Cell* top, Cell* right, Cell* bottom) { neighbors_[0] = left; neighbors_[1] = top; neighbors_[2] = right; neighbors_[3] = bottom; } __device__ Agent::Agent(uint32_t random_state, uint8_t type_identifier) : random_state_(random_state), type_identifier_(type_identifier) { assert(random_state != 0); } __device__ uint32_t* Agent::random_state() { return &random_state_; } __device__ void Agent::set_new_position(Cell* new_pos) { // Check for race condition. (This is not bullet proof.) assert(new_position_ == position_); new_position_ = new_pos; } __device__ Cell* Agent::position() const { return position_; } __device__ void Agent::set_position(Cell* cell) { position_ = cell; } // TODO: Verify that RTTI (dynamic_cast) does not work in device code. __device__ uint8_t Agent::type_identifier() const { return type_identifier_; } __device__ Fish::Fish(uint32_t random_state) : Agent(random_state, kTypeId), egg_timer_(random_state % SPAWN_THRESHOLD) { assert(random_state != 0); } __device__ void Fish::prepare() { assert(type_identifier() == kTypeId); egg_timer_++; // Fallback: Stay on current cell. new_position_ = position_; assert(position_ != nullptr); position_->request_random_free_neighbor(); } __device__ void Fish::update() { assert(type_identifier() == kTypeId); Cell* old_position = position_; if (old_position != new_position_) { old_position->leave(); new_position_->enter(this); if (OPTION_FISH_SPAWN && egg_timer_ > SPAWN_THRESHOLD) { uint32_t new_random_state = random_number(&random_state_) + 401; new_random_state = new_random_state != 0 ? new_random_state : random_state_; auto* new_fish = allocate<Fish>(new_random_state); assert(new_fish != nullptr); old_position->enter(new_fish); egg_timer_ = 0; } } } __device__ Shark::Shark(uint32_t random_state) : Agent(random_state, kTypeId), energy_(ENERGY_START), egg_timer_(random_state % SPAWN_THRESHOLD) { assert(random_state_ != 0); } __device__ void Shark::prepare() { assert(type_identifier() == kTypeId); egg_timer_++; energy_--; assert(position_ != nullptr); if (OPTION_SHARK_DIE && energy_ == 0) { // Do nothing. Shark will die. } else { // Fallback: Stay on current cell. new_position_ = position_; position_->request_random_fish_neighbor(); } } __device__ void Shark::update() { assert(type_identifier() == kTypeId); if (OPTION_SHARK_DIE && energy_ == 0) { position_->kill(); } else { Cell* old_position = position_; if (old_position != new_position_) { if (new_position_->has_fish()) { energy_ += ENERGY_BOOST; new_position_->kill(); } old_position->leave(); new_position_->enter(this); if (OPTION_SHARK_SPAWN && egg_timer_ > SPAWN_THRESHOLD) { assert(random_state_ != 0); uint32_t new_random_state = random_number(&random_state_) + 601; new_random_state = new_random_state != 0 ? new_random_state : random_state_; auto* new_shark = allocate<Shark>(new_random_state); assert(new_shark != nullptr); old_position->enter(new_shark); egg_timer_ = 0; } } } } __device__ void Cell::kill() { assert(agent_ != nullptr); if (agent_->type_identifier() == 1) { deallocate_untyped<1>(agent_); } else if (agent_->type_identifier() == 2) { deallocate_untyped<2>(agent_); } else { // Unknown type. assert(false); } agent_ = nullptr; } // ----- KERNELS ----- __device__ Cell* cells[GRID_SIZE_X * GRID_SIZE_Y]; __global__ void create_cells() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < GRID_SIZE_Y*GRID_SIZE_X) { int x = tid % GRID_SIZE_X; int y = tid / GRID_SIZE_X; float init_state = __logf(tid + 401); uint32_t init_state_int = *reinterpret_cast<uint32_t*>(&init_state); // Cell* new_cell = new Cell(init_state_int); Cell* new_cell = allocate<Cell>(601*x*x*y + init_state_int); assert(new_cell != nullptr); cells[tid] = new_cell; } } __global__ void setup_cells() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < GRID_SIZE_Y*GRID_SIZE_X) { int x = tid % GRID_SIZE_X; int y = tid / GRID_SIZE_X; Cell* left = x > 0 ? cells[y*GRID_SIZE_X + x - 1] : cells[y*GRID_SIZE_X + GRID_SIZE_X - 1]; Cell* right = x < GRID_SIZE_X - 1 ? cells[y*GRID_SIZE_X + x + 1] : cells[y*GRID_SIZE_X]; Cell* top = y > 0 ? cells[(y - 1)*GRID_SIZE_X + x] : cells[(GRID_SIZE_Y - 1)*GRID_SIZE_X + x]; Cell* bottom = y < GRID_SIZE_Y - 1 ? cells[(y + 1)*GRID_SIZE_X + x] : cells[x]; // left, top, right, bottom cells[tid]->set_neighbors(left, top, right, bottom); // Initialize with random agent. uint32_t agent_type = random_number(cells[tid]->random_state(), 4); if (agent_type == 0) { auto* agent = allocate<Fish>(*(cells[tid]->random_state())); assert(agent != nullptr); cells[tid]->enter(agent); } else if (agent_type == 1) { auto* agent = allocate<Shark>(*(cells[tid]->random_state())); assert(agent != nullptr); cells[tid]->enter(agent); } else { // Free cell. } } } // Problem: It is not easy to keep track of all objects of a class if they are // dynamically allocated. But we want to benchmark the performance of new/ // delete in CUDA. // Solution: Fill these arrays in a separate kernel by iterating over all // cells, storing agents in the respective array slots, and compacting the // arrays. We do not measure the performance of these steps. __device__ uint32_t num_sharks = 0; __device__ Shark* sharks[GRID_SIZE_Y * GRID_SIZE_X]; __device__ uint32_t num_fish = 0; __device__ Fish* fish[GRID_SIZE_Y * GRID_SIZE_X]; __global__ void print_checksum() { uint64_t chksum = 0; // Sorting of the array does not matter in the calculation here. for (int i = 0; i < num_sharks; ++i) { chksum += *(sharks[i]->position()->random_state()) % 601; } for (int i = 0; i < num_fish; ++i) { chksum += *(fish[i]->position()->random_state()) % 601; } printf("%" PRIu64 "\n", chksum); } __global__ void reset_fish_array() { num_fish = 0; } __global__ void reset_shark_array() { num_sharks = 0; } // One thread per cell. __global__ void find_fish() { assert(gridDim.x * blockDim.x == 1); num_fish = 0; for (int i = 0; i < decltype(memory_allocator)::kN; ++i) { if (memory_allocator.is_allocated<Fish>(i)) { fish[num_fish++] = memory_allocator.get_obj<Fish>(i); } } } __global__ void find_sharks() { assert(gridDim.x * blockDim.x == 1); num_sharks = 0; for (int i = 0; i < decltype(memory_allocator)::kN; ++i) { if (memory_allocator.is_allocated<Shark>(i)) { sharks[num_sharks++] = memory_allocator.get_obj<Shark>(i); } } } __global__ void find_cells() { assert(gridDim.x * blockDim.x == 1); int num_cells = 0; for (int i = 0; i < decltype(memory_allocator)::kN; ++i) { if (memory_allocator.is_allocated<Cell>(i)) { cells[num_cells++] = memory_allocator.get_obj<Cell>(i); } } } void generate_fish_array() { find_fish<<<1,1>>>(); gpuErrchk(cudaDeviceSynchronize()); } void generate_shark_array() { find_sharks<<<1,1>>>(); gpuErrchk(cudaDeviceSynchronize()); } __global__ void cell_prepare() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < GRID_SIZE_Y*GRID_SIZE_X) { cells[tid]->prepare(); } } __global__ void cell_decide() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < GRID_SIZE_Y*GRID_SIZE_X) { cells[tid]->decide(); } } __global__ void fish_prepare() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < num_fish) { assert(fish[tid] != nullptr); fish[tid]->prepare(); } } __global__ void fish_update() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < num_fish) { assert(fish[tid] != nullptr); fish[tid]->update(); } } __global__ void shark_prepare() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < num_sharks) { assert(sharks[tid] != nullptr); sharks[tid]->prepare(); } } __global__ void shark_update() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < num_sharks) { assert(sharks[tid] != nullptr); sharks[tid]->update(); } } void generate_shark_fish_arrays() { generate_fish_array(); generate_shark_array(); } void step() { cell_prepare<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(); gpuErrchk(cudaDeviceSynchronize()); fish_prepare<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(); gpuErrchk(cudaDeviceSynchronize()); cell_decide<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(); gpuErrchk(cudaDeviceSynchronize()); fish_update<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(); gpuErrchk(cudaDeviceSynchronize()); cell_prepare<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(); gpuErrchk(cudaDeviceSynchronize()); shark_prepare<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(); gpuErrchk(cudaDeviceSynchronize()); cell_decide<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(); gpuErrchk(cudaDeviceSynchronize()); shark_update<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(); gpuErrchk(cudaDeviceSynchronize()); } __global__ void init_memory_system() { initialize_allocator(); } void initialize() { //init the heap initHeap(512*1024U*1024U); init_memory_system<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(); gpuErrchk(cudaDeviceSynchronize()); create_cells<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(); gpuErrchk(cudaDeviceSynchronize()); setup_cells<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(); gpuErrchk(cudaDeviceSynchronize()); find_cells<<<1,1>>>(); gpuErrchk(cudaDeviceSynchronize()); } __device__ uint32_t d_gui_map[GRID_SIZE_Y * GRID_SIZE_X]; uint32_t gui_map[GRID_SIZE_Y * GRID_SIZE_X]; __global__ void fill_gui_map() { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < GRID_SIZE_Y*GRID_SIZE_X) { if (cells[tid]->agent() != nullptr) { d_gui_map[tid] = cells[tid]->agent()->type_identifier(); } else { d_gui_map[tid] = 0; } } } void update_gui_map() { fill_gui_map<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(); gpuErrchk(cudaDeviceSynchronize()); cudaMemcpyFromSymbol(gui_map, d_gui_map, sizeof(uint32_t)*GRID_SIZE_X*GRID_SIZE_Y, 0, cudaMemcpyDeviceToHost); gpuErrchk(cudaDeviceSynchronize()); } int h_num_fish = 0; int h_num_sharks = 0; void print_stats() { generate_fish_array(); generate_shark_array(); //printf("\n Fish: %i, Sharks: %i CHKSUM: ", h_num_fish, h_num_sharks); print_checksum<<<1, 1>>>(); gpuErrchk(cudaDeviceSynchronize()); } int main(int argc, char* arvg[]) { //cudaDeviceSetLimit(cudaLimitMallocHeapSize, 256*1024*1024); initialize(); size_t heap_size; cudaDeviceGetLimit(&heap_size, cudaLimitMallocHeapSize); printf("CUDA heap size: %lu\n", heap_size); //printf("Computing...\n"); //int time_running = 0; for (int i = 0; i<1000; ++i) { if (i%50==0) { //print_stats(); //render(); //printf(" Time: %i usec", time_running); //time_running = 0; } generate_shark_fish_arrays(); // Printing: RUNNING TIME, NUM_FISH, NUM_SHARKS, CHKSUM, FISH_USE, FISH_ALLOC, SHARK_USE, SHARK_ALLOC auto time_before = std::chrono::system_clock::now(); step(); auto time_after = std::chrono::system_clock::now(); int time_running = std::chrono::duration_cast<std::chrono::microseconds>( time_after - time_before).count(); printf("%i,", time_running); print_stats(); //printf("\n"); } return 0; } } // namespace wa_tor int main(int argc, char* arvg[]) { return wa_tor::main(0, nullptr); }
6dff3d044528600f0021963c56dc488fd471ab93.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" __global__ void phase1( KEY_PTR keys, unsigned int * offset, unsigned int length, unsigned int* count, unsigned int bucketCount) { unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid<length) { KEY_T key=keys[tid]; unsigned int bucket=hash_h(key,bucketCount); offset[tid]=atomicInc (count+bucket,MAX_INT); } __syncthreads(); } __global__ void copyToBucket( KEY_PTR keys, VALUE_PTR values, unsigned int * offset, unsigned int length, unsigned int* start, unsigned int bucketCount, KEY_PTR bufferK, VALUE_PTR bufferV) { unsigned tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid<length) { KEY_T key =keys[tid]; unsigned int bucket=hash_h(key,bucketCount); VALUE_T value=values[tid]; unsigned int index=start[bucket]+offset[tid]; //index=(index * BUCKET_ITEM_SIZE); bufferK[index]=key; bufferV[index]=value; //*(BUFFER_ITEM_KEY_PTR(buffer,index))=key; //*(BUFFER_ITEM_VALUE_PTR(buffer,index))=value; } } __global__ void bucketSort(KEY_PTR bufferK,VALUE_PTR bufferV, unsigned int * start,unsigned int * bucketSize,unsigned int bucketCount,KEY_PTR TK,VALUE_PTR TV){ __shared__ KEY_T keys[MAX_BUCKET_ITEM]; unsigned int keyCount[MAX_BUCKET_ITEM/32]; //unsigned int keyCount=0; unsigned int blockOffset=start[blockIdx.x]; unsigned int size=bucketSize[blockIdx.x]; unsigned int chunks=size>>5; chunks= (chunks<<5==size)?chunks:chunks+1; for(unsigned int j=0;j<chunks;j++){ if((j<<5)+threadIdx.x<size) keys[(j<<5)+threadIdx.x]=bufferK[blockOffset+(j<<5)+threadIdx.x];// } __syncthreads(); for(unsigned int j=0;j<chunks;j++){ if((j<<5)+threadIdx.x<size){ keyCount[j]=0; for(int i=0; i<size; i++){ //if( keys[(i<<5)+threadIdx.x]> keys[i] ) keyCount++; keyCount[j]=( keys[(j<<5)+threadIdx.x]> keys[i] )?keyCount[j]+1:keyCount[j]; } } } __syncthreads(); for(unsigned int j=0;j<chunks;j++){ if((j<<5)+threadIdx.x<size){ TK[GET_KEY_INDEX(blockIdx.x,keyCount[j])]=keys[(j<<5)+threadIdx.x]; TV[GET_VALUE_INDEX(blockIdx.x,keyCount[j])]=bufferV[blockOffset+(j<<5)+threadIdx.x]; } } }
6dff3d044528600f0021963c56dc488fd471ab93.cu
#include "common.h" __global__ void phase1( KEY_PTR keys, unsigned int * offset, unsigned int length, unsigned int* count, unsigned int bucketCount) { unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid<length) { KEY_T key=keys[tid]; unsigned int bucket=hash_h(key,bucketCount); offset[tid]=atomicInc (count+bucket,MAX_INT); } __syncthreads(); } __global__ void copyToBucket( KEY_PTR keys, VALUE_PTR values, unsigned int * offset, unsigned int length, unsigned int* start, unsigned int bucketCount, KEY_PTR bufferK, VALUE_PTR bufferV) { unsigned tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid<length) { KEY_T key =keys[tid]; unsigned int bucket=hash_h(key,bucketCount); VALUE_T value=values[tid]; unsigned int index=start[bucket]+offset[tid]; //index=(index * BUCKET_ITEM_SIZE); bufferK[index]=key; bufferV[index]=value; //*(BUFFER_ITEM_KEY_PTR(buffer,index))=key; //*(BUFFER_ITEM_VALUE_PTR(buffer,index))=value; } } __global__ void bucketSort(KEY_PTR bufferK,VALUE_PTR bufferV, unsigned int * start,unsigned int * bucketSize,unsigned int bucketCount,KEY_PTR TK,VALUE_PTR TV){ __shared__ KEY_T keys[MAX_BUCKET_ITEM]; unsigned int keyCount[MAX_BUCKET_ITEM/32]; //unsigned int keyCount=0; unsigned int blockOffset=start[blockIdx.x]; unsigned int size=bucketSize[blockIdx.x]; unsigned int chunks=size>>5; chunks= (chunks<<5==size)?chunks:chunks+1; for(unsigned int j=0;j<chunks;j++){ if((j<<5)+threadIdx.x<size) keys[(j<<5)+threadIdx.x]=bufferK[blockOffset+(j<<5)+threadIdx.x];// } __syncthreads(); for(unsigned int j=0;j<chunks;j++){ if((j<<5)+threadIdx.x<size){ keyCount[j]=0; for(int i=0; i<size; i++){ //if( keys[(i<<5)+threadIdx.x]> keys[i] ) keyCount++; keyCount[j]=( keys[(j<<5)+threadIdx.x]> keys[i] )?keyCount[j]+1:keyCount[j]; } } } __syncthreads(); for(unsigned int j=0;j<chunks;j++){ if((j<<5)+threadIdx.x<size){ TK[GET_KEY_INDEX(blockIdx.x,keyCount[j])]=keys[(j<<5)+threadIdx.x]; TV[GET_VALUE_INDEX(blockIdx.x,keyCount[j])]=bufferV[blockOffset+(j<<5)+threadIdx.x]; } } }
fa897f968703491614dca28bc6367a3df1473a63.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_CHANNELWISE_SCALE_BIAS_LAYER_INSTANTIATE #include "lbann/layers/learning/channelwise_scale_bias.hpp" #include "lbann/utils/gpu/helpers.hpp" #ifdef HYDROGEN_HAVE_CUB #ifdef LBANN_HAS_CUDA #include "hipcub/hipcub.hpp" #elif defined LBANN_HAS_ROCM #include "hipcub/block/block_reduce.hpp" namespace cub = hipcub; #endif // LBANN_HAS_CUDA #endif // HYDROGEN_HAVE_CUB namespace lbann { namespace { /** * Block dimensions: bsizex x bsizey x 1 * * Grid dimensions: (channel_size / bsizex) x (width / bsizey) x num_channels */ template <typename TensorDataType> __global__ void fp_kernel(size_t num_channels, size_t channel_size, size_t width, const TensorDataType* __restrict__ input, size_t input_ldim, TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ scale, const TensorDataType* __restrict__ bias) { // Indices const size_t bidz = blockIdx.z; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nblocksz = gridDim.z; // Apply channel-wise scale/bias for (size_t channel = bidz; channel < num_channels; channel += nblocksz) { const auto a = scale[channel]; const auto b = bias[channel]; const size_t row_start = channel * channel_size; const size_t row_end = (channel + 1) * channel_size; const size_t col_start = 0; const size_t col_end = width; for (size_t col = col_start+gidy; col < col_end; col += nthreadsy) { for (size_t row = row_start+gidx; row < row_end; row += nthreadsx) { const auto& x = input[row + col*input_ldim]; auto& y = output[row + col*output_ldim]; y = a * x + b; } } } } /** * Block dimensions: bsizex x bsizey x 1 * * Grid dimensions: (channel_size / bsizex) x (width / bsizey) x num_channels */ template <size_t bsizex, size_t bsizey, typename TensorDataType> __global__ void bp_kernel(size_t num_channels, size_t channel_size, size_t width, const TensorDataType* __restrict__ input, size_t input_ldim, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, TensorDataType* __restrict__ gradient_wrt_input, size_t gradient_wrt_input_ldim, const TensorDataType* __restrict__ scale, TensorDataType* __restrict__ gradient_wrt_scale, TensorDataType* __restrict__ gradient_wrt_bias) { // Indices const size_t tid = threadIdx.x + threadIdx.y * blockDim.x; const size_t bidz = blockIdx.z; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nblocksz = gridDim.z; for (size_t channel = bidz; channel < num_channels; channel += nblocksz) { // Accumulate gradient contributions for thread in private memory TensorDataType private_da{0}, private_db{0}; const auto a = scale[channel]; const size_t row_start = channel * channel_size; const size_t row_end = (channel + 1) * channel_size; const size_t col_start = 0; const size_t col_end = width; for (size_t col = col_start+gidy; col < col_end; col += nthreadsy) { for (size_t row = row_start+gidx; row < row_end; row += nthreadsx) { const auto& x = input[row + col*input_ldim]; const auto& dy = gradient_wrt_output[row + col*gradient_wrt_output_ldim]; auto& dx = gradient_wrt_input[row + col*gradient_wrt_input_ldim]; private_da += x * dy; private_db += dy; dx = a * dy; } } // Accumulate gradient contributions for block and add to result #ifdef HYDROGEN_HAVE_CUB constexpr auto reduce_algo = hipcub::BLOCK_REDUCE_WARP_REDUCTIONS; using BlockReduce = hipcub::BlockReduce<TensorDataType, bsizex, reduce_algo, bsizey>; __shared__ typename BlockReduce::TempStorage workspace; __syncthreads(); const auto da = BlockReduce(workspace).Sum(private_da); if (tid == 0) { gpu_lib::atomic_add(&gradient_wrt_scale[channel], da); } __syncthreads(); const auto db = BlockReduce(workspace).Sum(private_db); if (tid == 0) { gpu_lib::atomic_add(&gradient_wrt_bias[channel], db); } #else __shared__ TensorDataType workspace[bsizex*bsizey]; workspace[tid] = private_da; for (size_t stride = bsizex*bsizey/2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { workspace[tid] += workspace[tid + stride]; } } if (tid == 0) { gpu_lib::atomic_add(&gradient_wrt_scale[channel], workspace[0]); } workspace[tid] = private_db; for (size_t stride = bsizex*bsizey/2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { workspace[tid] += workspace[tid + stride]; } } if (tid == 0) { gpu_lib::atomic_add(&gradient_wrt_bias[channel], workspace[0]); } #endif // HYDROGEN_HAVE_CUB } } } // namespace template <typename TensorDataType, data_layout T_layout, El::Device Dev> void channelwise_scale_bias_layer<TensorDataType, T_layout, Dev>::fp_compute() { using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>; // Local matrices const auto& local_input = dynamic_cast<const GPUMatType&>(this->get_local_prev_activations()); auto& local_output = dynamic_cast<GPUMatType&>(this->get_local_activations()); const auto& local_weights = dynamic_cast<const GPUMatType&>(this->weights_values(0).LockedMatrix()); const auto local_scale = El::LockedView(local_weights, El::ALL, El::IR(0)); const auto local_bias = El::LockedView(local_weights, El::ALL, El::IR(1)); // Dimensions // Note: channel_size is the number of input entries per channel and // local_width is the number of local mini-batch samples. const auto dims = this->get_output_dims(); const El::Int num_channels = dims[0]; const El::Int channel_size = std::accumulate(dims.begin() + 1, dims.end(), 1, std::multiplies<int>()); const El::Int local_width = local_input.Width(); // Apply channel-wise scale and bias if (!local_input.IsEmpty()) { constexpr size_t block_size_x = 256; constexpr size_t block_size_y = 1; dim3 block_dims, grid_dims; block_dims.x = block_size_x; block_dims.y = block_size_y; grid_dims.x = (channel_size + block_size_x - 1) / block_size_x; grid_dims.y = (local_width + block_size_y - 1) / block_size_y; grid_dims.z = num_channels; gpu_lib::clip_grid_dims(grid_dims); auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input), gpu::get_sync_info(local_output), gpu::get_sync_info(local_weights)); hydrogen::gpu::LaunchKernel( fp_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, num_channels, channel_size, local_width, local_input.LockedBuffer(), local_input.LDim(), local_output.Buffer(), local_output.LDim(), local_scale.LockedBuffer(), local_bias.LockedBuffer()); } } template <typename TensorDataType, data_layout T_layout, El::Device Dev> void channelwise_scale_bias_layer<TensorDataType, T_layout, Dev>::bp_compute() { using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>; // Local matrices const auto& local_input = dynamic_cast<const GPUMatType&>(this->get_local_prev_activations()); const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(this->get_local_prev_error_signals()); auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(this->get_local_error_signals()); const auto& local_weights = dynamic_cast<const GPUMatType&>(this->weights_values(0).LockedMatrix()); auto& local_gradient_wrt_weights = dynamic_cast<GPUMatType&>(this->m_weights_gradient->Matrix()); const auto local_scale = El::LockedView(local_weights, El::ALL, El::IR(0)); auto local_gradient_wrt_scale = El::View(local_gradient_wrt_weights, El::ALL, El::IR(0)); auto local_gradient_wrt_bias = El::View(local_gradient_wrt_weights, El::ALL, El::IR(1)); // Dimensions // Note: channel_size is the number of input entries per channel and // local_width is the number of local mini-batch samples. const auto dims = this->get_output_dims(); const El::Int num_channels = dims[0]; const El::Int channel_size = std::accumulate(dims.begin() + 1, dims.end(), 1, std::multiplies<int>()); const El::Int local_width = local_input.Width(); // Compute gradients El::Zero(local_gradient_wrt_weights); if (!local_input.IsEmpty()) { constexpr size_t block_size_x = 256; constexpr size_t block_size_y = 1; dim3 block_dims, grid_dims; block_dims.x = block_size_x; block_dims.y = block_size_y; grid_dims.x = (channel_size + block_size_x - 1) / block_size_x; grid_dims.y = (local_width + block_size_y - 1) / block_size_y; grid_dims.z = num_channels; gpu_lib::clip_grid_dims(grid_dims); auto multisync = El::MakeMultiSync( gpu::get_sync_info(local_input), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_gradient_wrt_weights), gpu::get_sync_info(local_weights)); hydrogen::gpu::LaunchKernel( bp_kernel<block_size_x, block_size_y, TensorDataType>, grid_dims, block_dims, 0, multisync, num_channels, channel_size, local_width, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim(), local_scale.LockedBuffer(), local_gradient_wrt_scale.Buffer(), local_gradient_wrt_bias.Buffer()); } // Update optimizer with gradient auto* opt = this->get_weights(0).get_optimizer(); if (opt != nullptr) { opt->add_to_gradient(*this->m_weights_gradient, El::TypeTraits<TensorDataType>::One(), true); } } #define PROTO(T) \ template class channelwise_scale_bias_layer< \ T, data_layout::DATA_PARALLEL, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
fa897f968703491614dca28bc6367a3df1473a63.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_CHANNELWISE_SCALE_BIAS_LAYER_INSTANTIATE #include "lbann/layers/learning/channelwise_scale_bias.hpp" #include "lbann/utils/gpu/helpers.hpp" #ifdef HYDROGEN_HAVE_CUB #ifdef LBANN_HAS_CUDA #include "cub/block/block_reduce.cuh" #elif defined LBANN_HAS_ROCM #include "hipcub/block/block_reduce.hpp" namespace cub = hipcub; #endif // LBANN_HAS_CUDA #endif // HYDROGEN_HAVE_CUB namespace lbann { namespace { /** * Block dimensions: bsizex x bsizey x 1 * * Grid dimensions: (channel_size / bsizex) x (width / bsizey) x num_channels */ template <typename TensorDataType> __global__ void fp_kernel(size_t num_channels, size_t channel_size, size_t width, const TensorDataType* __restrict__ input, size_t input_ldim, TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ scale, const TensorDataType* __restrict__ bias) { // Indices const size_t bidz = blockIdx.z; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nblocksz = gridDim.z; // Apply channel-wise scale/bias for (size_t channel = bidz; channel < num_channels; channel += nblocksz) { const auto a = scale[channel]; const auto b = bias[channel]; const size_t row_start = channel * channel_size; const size_t row_end = (channel + 1) * channel_size; const size_t col_start = 0; const size_t col_end = width; for (size_t col = col_start+gidy; col < col_end; col += nthreadsy) { for (size_t row = row_start+gidx; row < row_end; row += nthreadsx) { const auto& x = input[row + col*input_ldim]; auto& y = output[row + col*output_ldim]; y = a * x + b; } } } } /** * Block dimensions: bsizex x bsizey x 1 * * Grid dimensions: (channel_size / bsizex) x (width / bsizey) x num_channels */ template <size_t bsizex, size_t bsizey, typename TensorDataType> __global__ void bp_kernel(size_t num_channels, size_t channel_size, size_t width, const TensorDataType* __restrict__ input, size_t input_ldim, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, TensorDataType* __restrict__ gradient_wrt_input, size_t gradient_wrt_input_ldim, const TensorDataType* __restrict__ scale, TensorDataType* __restrict__ gradient_wrt_scale, TensorDataType* __restrict__ gradient_wrt_bias) { // Indices const size_t tid = threadIdx.x + threadIdx.y * blockDim.x; const size_t bidz = blockIdx.z; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nblocksz = gridDim.z; for (size_t channel = bidz; channel < num_channels; channel += nblocksz) { // Accumulate gradient contributions for thread in private memory TensorDataType private_da{0}, private_db{0}; const auto a = scale[channel]; const size_t row_start = channel * channel_size; const size_t row_end = (channel + 1) * channel_size; const size_t col_start = 0; const size_t col_end = width; for (size_t col = col_start+gidy; col < col_end; col += nthreadsy) { for (size_t row = row_start+gidx; row < row_end; row += nthreadsx) { const auto& x = input[row + col*input_ldim]; const auto& dy = gradient_wrt_output[row + col*gradient_wrt_output_ldim]; auto& dx = gradient_wrt_input[row + col*gradient_wrt_input_ldim]; private_da += x * dy; private_db += dy; dx = a * dy; } } // Accumulate gradient contributions for block and add to result #ifdef HYDROGEN_HAVE_CUB constexpr auto reduce_algo = cub::BLOCK_REDUCE_WARP_REDUCTIONS; using BlockReduce = cub::BlockReduce<TensorDataType, bsizex, reduce_algo, bsizey>; __shared__ typename BlockReduce::TempStorage workspace; __syncthreads(); const auto da = BlockReduce(workspace).Sum(private_da); if (tid == 0) { gpu_lib::atomic_add(&gradient_wrt_scale[channel], da); } __syncthreads(); const auto db = BlockReduce(workspace).Sum(private_db); if (tid == 0) { gpu_lib::atomic_add(&gradient_wrt_bias[channel], db); } #else __shared__ TensorDataType workspace[bsizex*bsizey]; workspace[tid] = private_da; for (size_t stride = bsizex*bsizey/2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { workspace[tid] += workspace[tid + stride]; } } if (tid == 0) { gpu_lib::atomic_add(&gradient_wrt_scale[channel], workspace[0]); } workspace[tid] = private_db; for (size_t stride = bsizex*bsizey/2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { workspace[tid] += workspace[tid + stride]; } } if (tid == 0) { gpu_lib::atomic_add(&gradient_wrt_bias[channel], workspace[0]); } #endif // HYDROGEN_HAVE_CUB } } } // namespace template <typename TensorDataType, data_layout T_layout, El::Device Dev> void channelwise_scale_bias_layer<TensorDataType, T_layout, Dev>::fp_compute() { using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>; // Local matrices const auto& local_input = dynamic_cast<const GPUMatType&>(this->get_local_prev_activations()); auto& local_output = dynamic_cast<GPUMatType&>(this->get_local_activations()); const auto& local_weights = dynamic_cast<const GPUMatType&>(this->weights_values(0).LockedMatrix()); const auto local_scale = El::LockedView(local_weights, El::ALL, El::IR(0)); const auto local_bias = El::LockedView(local_weights, El::ALL, El::IR(1)); // Dimensions // Note: channel_size is the number of input entries per channel and // local_width is the number of local mini-batch samples. const auto dims = this->get_output_dims(); const El::Int num_channels = dims[0]; const El::Int channel_size = std::accumulate(dims.begin() + 1, dims.end(), 1, std::multiplies<int>()); const El::Int local_width = local_input.Width(); // Apply channel-wise scale and bias if (!local_input.IsEmpty()) { constexpr size_t block_size_x = 256; constexpr size_t block_size_y = 1; dim3 block_dims, grid_dims; block_dims.x = block_size_x; block_dims.y = block_size_y; grid_dims.x = (channel_size + block_size_x - 1) / block_size_x; grid_dims.y = (local_width + block_size_y - 1) / block_size_y; grid_dims.z = num_channels; gpu_lib::clip_grid_dims(grid_dims); auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input), gpu::get_sync_info(local_output), gpu::get_sync_info(local_weights)); hydrogen::gpu::LaunchKernel( fp_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, num_channels, channel_size, local_width, local_input.LockedBuffer(), local_input.LDim(), local_output.Buffer(), local_output.LDim(), local_scale.LockedBuffer(), local_bias.LockedBuffer()); } } template <typename TensorDataType, data_layout T_layout, El::Device Dev> void channelwise_scale_bias_layer<TensorDataType, T_layout, Dev>::bp_compute() { using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>; // Local matrices const auto& local_input = dynamic_cast<const GPUMatType&>(this->get_local_prev_activations()); const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(this->get_local_prev_error_signals()); auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(this->get_local_error_signals()); const auto& local_weights = dynamic_cast<const GPUMatType&>(this->weights_values(0).LockedMatrix()); auto& local_gradient_wrt_weights = dynamic_cast<GPUMatType&>(this->m_weights_gradient->Matrix()); const auto local_scale = El::LockedView(local_weights, El::ALL, El::IR(0)); auto local_gradient_wrt_scale = El::View(local_gradient_wrt_weights, El::ALL, El::IR(0)); auto local_gradient_wrt_bias = El::View(local_gradient_wrt_weights, El::ALL, El::IR(1)); // Dimensions // Note: channel_size is the number of input entries per channel and // local_width is the number of local mini-batch samples. const auto dims = this->get_output_dims(); const El::Int num_channels = dims[0]; const El::Int channel_size = std::accumulate(dims.begin() + 1, dims.end(), 1, std::multiplies<int>()); const El::Int local_width = local_input.Width(); // Compute gradients El::Zero(local_gradient_wrt_weights); if (!local_input.IsEmpty()) { constexpr size_t block_size_x = 256; constexpr size_t block_size_y = 1; dim3 block_dims, grid_dims; block_dims.x = block_size_x; block_dims.y = block_size_y; grid_dims.x = (channel_size + block_size_x - 1) / block_size_x; grid_dims.y = (local_width + block_size_y - 1) / block_size_y; grid_dims.z = num_channels; gpu_lib::clip_grid_dims(grid_dims); auto multisync = El::MakeMultiSync( gpu::get_sync_info(local_input), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_gradient_wrt_weights), gpu::get_sync_info(local_weights)); hydrogen::gpu::LaunchKernel( bp_kernel<block_size_x, block_size_y, TensorDataType>, grid_dims, block_dims, 0, multisync, num_channels, channel_size, local_width, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim(), local_scale.LockedBuffer(), local_gradient_wrt_scale.Buffer(), local_gradient_wrt_bias.Buffer()); } // Update optimizer with gradient auto* opt = this->get_weights(0).get_optimizer(); if (opt != nullptr) { opt->add_to_gradient(*this->m_weights_gradient, El::TypeTraits<TensorDataType>::One(), true); } } #define PROTO(T) \ template class channelwise_scale_bias_layer< \ T, data_layout::DATA_PARALLEL, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
cb60456e545f1d12e548ef45387daef21dc463f8.hip
// !!! This is a file automatically generated by hipify!!! #include "transmit.h" __host__ Code *create_code(){ Code *code = (Code *)malloc(sizeof(Code)); code->next = NULL; return code; } __host__ Source *create_source(){ Source *source = (Source *)malloc(sizeof(Source)); source->codes = create_code(); source->codes_len = 0; source->next = NULL; return source; } __host__ void deleteSource(Source *source){ Source *next; Code *next_code; while(source){ next = source->next; while(source->codes){ next_code = source->codes->next; free(source->codes); source->codes = next_code; } free(source); source = next; } } __host__ Source *get_strings(FILE *in, char delimiter){ Source *source = create_source(); Source *cur_source = source; Source *prev_source = NULL; Code *code = cur_source->codes; int i = 0; char c; for(c = fgetc(in); c != EOF; c = fgetc(in)){ if(c == delimiter){ if(cur_source->codes_len > 0){ code->code[i] = '\0'; cur_source->next = create_source(); prev_source = cur_source; cur_source = cur_source->next; code = cur_source->codes; i = 0; } } else{ if(i == CODE_LENGTH){ code->next = create_code(); code = code->next; i = 0; } code->code[i++] = c; cur_source->codes_len++; } } if(cur_source->codes_len == 0){ if(prev_source == NULL){ return NULL; } prev_source->next = NULL; deleteSource(cur_source); } return source; } __host__ int pack_strings(int **data, Source *source){ Source *cur_source; int source_len = 0, data_len = 0; int i; int *strhead, *lenhead; cur_source = source; while(cur_source){ source_len++; data_len += cur_source->codes_len; cur_source = cur_source->next; } data_len += source_len * 2 + 1; *data = (int *)malloc(sizeof(int) * data_len); **data = source_len; lenhead = *data + 1; strhead = lenhead + source_len; while(source){ *lenhead++ = source->codes_len + 1; while(source->codes){ for(i = 0; i < CODE_LENGTH && source->codes->code[i]; i++){ *strhead++ = source->codes->code[i]; } source->codes = source->codes->next; } *strhead++ = '\0'; source = source->next; } return data_len; } __host__ void transmit_data(int **data_d, int *data, int len){ hipMalloc(data_d, sizeof(int) * len); hipMemcpy(*data_d, data, sizeof(int) * len, hipMemcpyHostToDevice); }
cb60456e545f1d12e548ef45387daef21dc463f8.cu
#include "transmit.h" __host__ Code *create_code(){ Code *code = (Code *)malloc(sizeof(Code)); code->next = NULL; return code; } __host__ Source *create_source(){ Source *source = (Source *)malloc(sizeof(Source)); source->codes = create_code(); source->codes_len = 0; source->next = NULL; return source; } __host__ void deleteSource(Source *source){ Source *next; Code *next_code; while(source){ next = source->next; while(source->codes){ next_code = source->codes->next; free(source->codes); source->codes = next_code; } free(source); source = next; } } __host__ Source *get_strings(FILE *in, char delimiter){ Source *source = create_source(); Source *cur_source = source; Source *prev_source = NULL; Code *code = cur_source->codes; int i = 0; char c; for(c = fgetc(in); c != EOF; c = fgetc(in)){ if(c == delimiter){ if(cur_source->codes_len > 0){ code->code[i] = '\0'; cur_source->next = create_source(); prev_source = cur_source; cur_source = cur_source->next; code = cur_source->codes; i = 0; } } else{ if(i == CODE_LENGTH){ code->next = create_code(); code = code->next; i = 0; } code->code[i++] = c; cur_source->codes_len++; } } if(cur_source->codes_len == 0){ if(prev_source == NULL){ return NULL; } prev_source->next = NULL; deleteSource(cur_source); } return source; } __host__ int pack_strings(int **data, Source *source){ Source *cur_source; int source_len = 0, data_len = 0; int i; int *strhead, *lenhead; cur_source = source; while(cur_source){ source_len++; data_len += cur_source->codes_len; cur_source = cur_source->next; } data_len += source_len * 2 + 1; *data = (int *)malloc(sizeof(int) * data_len); **data = source_len; lenhead = *data + 1; strhead = lenhead + source_len; while(source){ *lenhead++ = source->codes_len + 1; while(source->codes){ for(i = 0; i < CODE_LENGTH && source->codes->code[i]; i++){ *strhead++ = source->codes->code[i]; } source->codes = source->codes->next; } *strhead++ = '\0'; source = source->next; } return data_len; } __host__ void transmit_data(int **data_d, int *data, int len){ cudaMalloc(data_d, sizeof(int) * len); cudaMemcpy(*data_d, data, sizeof(int) * len, cudaMemcpyHostToDevice); }
d8628ffdd752e88550e6c5c1c34d86cca5761311.hip
// !!! This is a file automatically generated by hipify!!! // autolykos.cu /******************************************************************************* AUTOLYKOS -- Autolykos puzzle cycle *******************************************************************************/ #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #endif #include "../include/cryptography.h" #include "../include/definitions.h" #include "../include/easylogging++.h" #include "../include/jsmn.h" #include "../include/mining.h" #include "../include/prehash.h" #include "../include/processing.h" #include "../include/reduction.h" #include "../include/request.h" #include "../include/httpapi.h" #include "../include/queue.h" #include "../include/cpuAutolykos.h" #include <ctype.h> #include <hip/hip_runtime.h> #include <curl/curl.h> #include <inttypes.h> #include <iostream> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> #include <sys/types.h> #include <atomic> #include <chrono> #include <mutex> #include <thread> #include <vector> #include <random> #ifdef _WIN32 #include <io.h> #define R_OK 4 #define W_OK 2 #define F_OK 0 #define access _access #else #include <unistd.h> #endif INITIALIZE_EASYLOGGINGPP using namespace std::chrono; std::atomic<int> end_jobs(0); void SenderThread(info_t *info, BlockQueue<MinerShare> *shQueue) { el::Helpers::setThreadName("sender thread"); while (true) { MinerShare share = shQueue->get(); LOG(INFO) << "Some GPU found and trying to POST a share: "; PostPuzzleSolution(info->to, (uint8_t *)&share.nonce); } } //////////////////////////////////////////////////////////////////////////////// // Miner thread cycle //////////////////////////////////////////////////////////////////////////////// void MinerThread(const int totalGPUCards, int deviceId, info_t *info, std::vector<double> *hashrates, std::vector<int> *tstamps, BlockQueue<MinerShare> *shQueue) { AutolykosAlg solVerifier; CUDA_CALL(hipSetDevice(deviceId)); hipSetDeviceFlags(hipDeviceScheduleBlockingSync); char threadName[20]; sprintf(threadName, "GPU %i miner", deviceId); el::Helpers::setThreadName(threadName); state_t state = STATE_KEYGEN; //========================================================================// // Host memory allocation //========================================================================// // CURL http request json_t request(0, REQ_LEN); // hash context // (212 + 4) bytes //ctx_t ctx_h; // autolykos variables uint8_t bound_h[NUM_SIZE_8]; uint8_t mes_h[NUM_SIZE_8]; uint8_t nonce[NONCE_SIZE_8]; char to[MAX_URL_SIZE]; // thread info variables uint_t blockId = 0; milliseconds start; //========================================================================// // Copy from global to thread local data //========================================================================// info->info_mutex.lock(); memcpy(mes_h, info->mes, NUM_SIZE_8); memcpy(bound_h, info->bound, NUM_SIZE_8); memcpy(to, info->to, MAX_URL_SIZE * sizeof(char)); // blockId = info->blockId.load(); //keepPrehash = info->keepPrehash; info->info_mutex.unlock(); //========================================================================// // Check GPU memory //========================================================================// size_t freeMem; size_t totalMem; CUDA_CALL(hipMemGetInfo(&freeMem, &totalMem)); if (freeMem < MIN_FREE_MEMORY) { LOG(ERROR) << "Not enough GPU memory for mining," << " minimum 2.8 GiB needed"; return; } //========================================================================// // Device memory allocation //========================================================================// // LOG(INFO) << "GPU " << deviceId << " allocating memory"; // height for puzzle uint32_t *height_d; CUDA_CALL(hipMalloc(&height_d, HEIGHT_SIZE)); uint32_t *data_d; CUDA_CALL(hipMalloc(&data_d, NUM_SIZE_8 + sizeof(ctx_t))); uint32_t *BHashes; CUDA_CALL(hipMalloc(&BHashes, (NUM_SIZE_8)*THREADS_PER_ITER)); // precalculated hashes // N_LEN * NUM_SIZE_8 bytes // 2 GiB uint32_t *hashes_d; CUDA_CALL(hipMalloc(&hashes_d, (uint32_t)N_LEN * NUM_SIZE_8)); //LOG(INFO) << "g" << LocalgpuId << " hashes_d: " << hashes_d << " ghashes_d[gpuId]: " << ghashes_d[gpuId]; // place to handle result of the puzzle uint32_t *indices_d; CUDA_CALL(hipMalloc(&indices_d, MAX_SOLS * sizeof(uint32_t))); // place to handle nonce if solution is found uint32_t indices_h[MAX_SOLS]; uint32_t *count_d; CUDA_CALL(hipMalloc(&count_d, sizeof(uint32_t))); CUDA_CALL(hipMemset(count_d, 0, sizeof(uint32_t))); CUDA_CALL(hipMemset(indices_d, 0, sizeof(uint32_t) * MAX_SOLS)); //========================================================================// // Autolykos puzzle cycle //========================================================================// uint64_t base = 0; uint64_t EndNonce = 0; uint32_t height = 0; int cntCycles = 0; int NCycles = 50; // wait for the very first block to come before starting while (info->blockId.load() == 0) { } start = duration_cast<milliseconds>(system_clock::now().time_since_epoch()); do { ++cntCycles; if (!(cntCycles % NCycles)) { milliseconds timediff = duration_cast<milliseconds>( system_clock::now().time_since_epoch()) - start; // change avg hashrate in global memory (*hashrates)[deviceId] = (double)NONCES_PER_ITER * (double)NCycles / ((double)1000 * timediff.count()); start = duration_cast<milliseconds>( system_clock::now().time_since_epoch()); (*tstamps)[deviceId] = start.count(); } // if solution was found by this thread wait for new block to come if (state == STATE_KEYGEN) { while (info->blockId.load() == blockId) { std::this_thread::sleep_for(std::chrono::milliseconds(10)); } state = STATE_CONTINUE; } // if proxy is disconnected wait for connection while (!info->doJob) { std::this_thread::sleep_for(std::chrono::milliseconds(10)); // LOG(INFO) << "GPU " << deviceId << " problem in proxy "; } uint_t controlId = info->blockId.load(); if (blockId != controlId) { // if info->blockId changed // read new message and bound to thread-local mem info->info_mutex.lock(); memcpy(mes_h, info->mes, NUM_SIZE_8); memcpy(bound_h, info->bound, NUM_SIZE_8); //divide nonces between gpus memcpy(&EndNonce, info->extraNonceEnd, NONCE_SIZE_8); memcpy(&base, info->extraNonceStart, NONCE_SIZE_8); uint64_t nonceChunk = 1 + (EndNonce - base) / totalGPUCards; base = *((uint64_t *)info->extraNonceStart) + deviceId * nonceChunk; EndNonce = base + nonceChunk; //LOG(INFO) << "gpu: " << deviceId << " base: " << base << " end: " << EndNonce; memcpy(&height, info->Hblock, HEIGHT_SIZE); info->info_mutex.unlock(); //LOG(INFO) << "GPU " << deviceId << " read new block data"; blockId = controlId; VLOG(1) << "Generated new keypair," << " copying new data in device memory now"; // copy message CUDA_CALL(hipMemcpy( ((uint8_t *)data_d), mes_h, NUM_SIZE_8, hipMemcpyHostToDevice)); VLOG(1) << "Starting prehashing with new block data"; Prehash(hashes_d, height); // calculate unfinalized hash of message VLOG(1) << "Starting InitMining"; //InitMining(&ctx_h, (uint32_t *)mes_h, NUM_SIZE_8); LOG(INFO) << "GPU " << deviceId << " started"; //cpyCtxSymbol((ctx_t*)(&ctx_h)); cpyBSymbol(bound_h); CUDA_CALL(hipDeviceSynchronize()); state = STATE_CONTINUE; } //LOG(INFO) << "Starting main BlockMining procedure"; // calculate solution candidates VLOG(1) << "Starting main BlockMining procedure"; hipLaunchKernelGGL(( BlockMiningStep1), dim3(1 + (THREADS_PER_ITER - 1) / (BLOCK_DIM * 4)), dim3(BLOCK_DIM), 0, 0, data_d, base, hashes_d, BHashes); hipLaunchKernelGGL(( BlockMiningStep2), dim3(1 + (THREADS_PER_ITER - 1) / BLOCK_DIM), dim3(BLOCK_DIM), 0, 0, data_d, base, height, hashes_d, indices_d, count_d, BHashes); VLOG(1) << "Trying to find solution"; // restart iteration if new block was found if (blockId != info->blockId.load()) { continue; } CUDA_CALL(hipMemcpy( indices_h, indices_d, MAX_SOLS * sizeof(uint32_t), hipMemcpyDeviceToHost)); // solution found if (indices_h[0]) { int i = 0; while (indices_h[i] && (i < 16 /*MAX_SOLS*/)) { if (!info->stratumMode && i != 0) { break; } *((uint64_t *)nonce) = base + indices_h[i] - 1; uint64_t endNonceT; memcpy(&endNonceT, info->extraNonceEnd, sizeof(uint64_t)); if ((*((uint64_t *)nonce)) <= endNonceT) { //LOG(INFO) << "sol check i: " << i << " sol index: "<< indices_h[i]; bool checksol = solVerifier.RunAlg(info->mes, nonce, info->bound, info->Hblock); if (checksol) { MinerShare share(*((uint64_t *)nonce)); shQueue->put(share); if (!info->stratumMode) { state = STATE_KEYGEN; //end_jobs.fetch_add(1, std::memory_order_relaxed); break; } } else { LOG(INFO) << " problem in verify solution, nonce: " << *((uint64_t *)nonce); //exit(0); } } else { LOG(INFO) << "nonce greater than end nonce, nonce: " << *((uint64_t *)nonce) << " endNonce: " << endNonceT; } i++; } memset(indices_h, 0, MAX_SOLS * sizeof(uint32_t)); CUDA_CALL(hipMemset( indices_d, 0, MAX_SOLS * sizeof(uint32_t))); CUDA_CALL(hipMemset(count_d, 0, sizeof(uint32_t))); } base += NONCES_PER_ITER; if (base > EndNonce) //end work { state = STATE_KEYGEN; end_jobs.fetch_add(1, std::memory_order_relaxed); } } while (1); } //////////////////////////////////////////////////////////////////////////////// // Main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { /************************************************************************** * Get conf files **************************************************************************/ char confName[14] = "config.json"; char *fileName = (argc > 0) ? confName : argv[1]; /** TODO: add argument flag ex: -logconf log.conf*/ bool haslog = (argc > 1); //========================================================================// // Setup log //========================================================================// if (haslog) { char *logFile = argv[2]; // Load configuration from file el::Configurations conf(logFile); // Reconfigure single logger el::Loggers::reconfigureLogger("default", conf); // Actually reconfigure all loggers instead el::Loggers::reconfigureAllLoggers(conf); // Now all the loggers will use configuration from file LOG(INFO) << "got file: " << logFile; } else { START_EASYLOGGINGPP(argc, argv); el::Loggers::reconfigureAllLoggers(el::ConfigurationType::Format, "%datetime %level [%thread] %msg"); LOG(INFO) << "using default logging conf: "; } el::Helpers::setThreadName("main thread"); //========================================================================// // Check GPU availability //========================================================================// int deviceCount; int status = EXIT_SUCCESS; if (hipGetDeviceCount(&deviceCount) != hipSuccess) { LOG(ERROR) << "Error checking GPU"; return EXIT_FAILURE; } LOG(INFO) << "Using " << deviceCount << " GPU devices"; //========================================================================// // Read configuration file //========================================================================// char from[MAX_URL_SIZE]; info_t info; info.blockId = 0; info.keepPrehash = 0; BlockQueue<MinerShare> solQueue; LOG(INFO) << "Using configuration file " << fileName; // check access to config file if (access(fileName, F_OK) == -1) { LOG(ERROR) << "Configuration file " << fileName << " is not found"; return EXIT_FAILURE; } // read configuration from file status = ReadConfig( fileName, from, info.to, info.endJob); if (status == EXIT_FAILURE) { return EXIT_FAILURE; } LOG(INFO) << "Block getting URL:\n " << from; LOG(INFO) << "Solution posting URL:\n " << info.to; //========================================================================// // Setup CURL //========================================================================// // CURL http request json_t request(0, REQ_LEN); // CURL init PERSISTENT_CALL_STATUS(curl_global_init(CURL_GLOBAL_ALL), CURLE_OK); //========================================================================// // Fork miner threads //========================================================================// std::vector<std::thread> miners(deviceCount); std::vector<double> hashrates(deviceCount); std::vector<int> lastTimestamps(deviceCount); std::vector<int> timestamps(deviceCount); // PCI bus and device IDs std::vector<std::pair<int, int>> devinfos(deviceCount); for (int i = 0; i < deviceCount; ++i) { hipDeviceProp_t props; if (hipGetDeviceProperties(&props, i) == hipSuccess) { devinfos[i] = std::make_pair(props.pciBusID, props.pciDeviceID); } miners[i] = std::thread(MinerThread, deviceCount, i, &info, &hashrates, &timestamps, &solQueue); hashrates[i] = 0; lastTimestamps[i] = 1; timestamps[i] = 0; } // get first block status = EXIT_FAILURE; while (status != EXIT_SUCCESS) { status = GetLatestBlock(from, &request, &info, 0); std::this_thread::sleep_for(std::chrono::milliseconds(800)); if (status != EXIT_SUCCESS) { LOG(INFO) << "Waiting for block data to be published by node..."; } } std::thread solSender(SenderThread, &info, &solQueue); std::thread httpApi = std::thread(HttpApiThread, &hashrates, &devinfos); //========================================================================// // Main thread get-block cycle //========================================================================// uint_t curlcnt = 0; const uint_t curltimes = 1000; milliseconds ms = milliseconds::zero(); // bomb node with HTTP with 10ms intervals, if new block came // signal miners with blockId while (1) { milliseconds start = duration_cast<milliseconds>( system_clock::now().time_since_epoch()); // get latest block status = GetLatestBlock(from, &request, &info, 0); if (status != EXIT_SUCCESS) { LOG(INFO) << "Getting block error"; } ms += duration_cast<milliseconds>( system_clock::now().time_since_epoch()) - start; ++curlcnt; if (!(curlcnt % curltimes)) { LOG(INFO) << "Average curling time " << ms.count() / (double)curltimes << " ms"; LOG(INFO) << "Current block candidate: " << request.ptr; ms = milliseconds::zero(); std::stringstream hrBuffer; hrBuffer << "Average hashrates: "; double totalHr = 0; for (int i = 0; i < deviceCount; ++i) { // check if miner thread is updating hashrate, e.g. alive if (!(curlcnt % (5 * curltimes))) { if (lastTimestamps[i] == timestamps[i]) { hashrates[i] = 0; } lastTimestamps[i] = timestamps[i]; } hrBuffer << "GPU" << i << " " << hashrates[i] << " MH/s "; totalHr += hashrates[i]; } hrBuffer << "Total " << totalHr << " MH/s "; LOG(INFO) << hrBuffer.str(); } std::this_thread::sleep_for(std::chrono::milliseconds(100)); int completeMiners = end_jobs.load(); if (completeMiners >= deviceCount) { end_jobs.store(0); JobCompleted(info.endJob); } } return EXIT_SUCCESS; } // autolykos.cu
d8628ffdd752e88550e6c5c1c34d86cca5761311.cu
// autolykos.cu /******************************************************************************* AUTOLYKOS -- Autolykos puzzle cycle *******************************************************************************/ #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #endif #include "../include/cryptography.h" #include "../include/definitions.h" #include "../include/easylogging++.h" #include "../include/jsmn.h" #include "../include/mining.h" #include "../include/prehash.h" #include "../include/processing.h" #include "../include/reduction.h" #include "../include/request.h" #include "../include/httpapi.h" #include "../include/queue.h" #include "../include/cpuAutolykos.h" #include <ctype.h> #include <cuda.h> #include <curl/curl.h> #include <inttypes.h> #include <iostream> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> #include <sys/types.h> #include <atomic> #include <chrono> #include <mutex> #include <thread> #include <vector> #include <random> #ifdef _WIN32 #include <io.h> #define R_OK 4 #define W_OK 2 #define F_OK 0 #define access _access #else #include <unistd.h> #endif INITIALIZE_EASYLOGGINGPP using namespace std::chrono; std::atomic<int> end_jobs(0); void SenderThread(info_t *info, BlockQueue<MinerShare> *shQueue) { el::Helpers::setThreadName("sender thread"); while (true) { MinerShare share = shQueue->get(); LOG(INFO) << "Some GPU found and trying to POST a share: "; PostPuzzleSolution(info->to, (uint8_t *)&share.nonce); } } //////////////////////////////////////////////////////////////////////////////// // Miner thread cycle //////////////////////////////////////////////////////////////////////////////// void MinerThread(const int totalGPUCards, int deviceId, info_t *info, std::vector<double> *hashrates, std::vector<int> *tstamps, BlockQueue<MinerShare> *shQueue) { AutolykosAlg solVerifier; CUDA_CALL(cudaSetDevice(deviceId)); cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); char threadName[20]; sprintf(threadName, "GPU %i miner", deviceId); el::Helpers::setThreadName(threadName); state_t state = STATE_KEYGEN; //========================================================================// // Host memory allocation //========================================================================// // CURL http request json_t request(0, REQ_LEN); // hash context // (212 + 4) bytes //ctx_t ctx_h; // autolykos variables uint8_t bound_h[NUM_SIZE_8]; uint8_t mes_h[NUM_SIZE_8]; uint8_t nonce[NONCE_SIZE_8]; char to[MAX_URL_SIZE]; // thread info variables uint_t blockId = 0; milliseconds start; //========================================================================// // Copy from global to thread local data //========================================================================// info->info_mutex.lock(); memcpy(mes_h, info->mes, NUM_SIZE_8); memcpy(bound_h, info->bound, NUM_SIZE_8); memcpy(to, info->to, MAX_URL_SIZE * sizeof(char)); // blockId = info->blockId.load(); //keepPrehash = info->keepPrehash; info->info_mutex.unlock(); //========================================================================// // Check GPU memory //========================================================================// size_t freeMem; size_t totalMem; CUDA_CALL(cudaMemGetInfo(&freeMem, &totalMem)); if (freeMem < MIN_FREE_MEMORY) { LOG(ERROR) << "Not enough GPU memory for mining," << " minimum 2.8 GiB needed"; return; } //========================================================================// // Device memory allocation //========================================================================// // LOG(INFO) << "GPU " << deviceId << " allocating memory"; // height for puzzle uint32_t *height_d; CUDA_CALL(cudaMalloc(&height_d, HEIGHT_SIZE)); uint32_t *data_d; CUDA_CALL(cudaMalloc(&data_d, NUM_SIZE_8 + sizeof(ctx_t))); uint32_t *BHashes; CUDA_CALL(cudaMalloc(&BHashes, (NUM_SIZE_8)*THREADS_PER_ITER)); // precalculated hashes // N_LEN * NUM_SIZE_8 bytes // 2 GiB uint32_t *hashes_d; CUDA_CALL(cudaMalloc(&hashes_d, (uint32_t)N_LEN * NUM_SIZE_8)); //LOG(INFO) << "g" << LocalgpuId << " hashes_d: " << hashes_d << " ghashes_d[gpuId]: " << ghashes_d[gpuId]; // place to handle result of the puzzle uint32_t *indices_d; CUDA_CALL(cudaMalloc(&indices_d, MAX_SOLS * sizeof(uint32_t))); // place to handle nonce if solution is found uint32_t indices_h[MAX_SOLS]; uint32_t *count_d; CUDA_CALL(cudaMalloc(&count_d, sizeof(uint32_t))); CUDA_CALL(cudaMemset(count_d, 0, sizeof(uint32_t))); CUDA_CALL(cudaMemset(indices_d, 0, sizeof(uint32_t) * MAX_SOLS)); //========================================================================// // Autolykos puzzle cycle //========================================================================// uint64_t base = 0; uint64_t EndNonce = 0; uint32_t height = 0; int cntCycles = 0; int NCycles = 50; // wait for the very first block to come before starting while (info->blockId.load() == 0) { } start = duration_cast<milliseconds>(system_clock::now().time_since_epoch()); do { ++cntCycles; if (!(cntCycles % NCycles)) { milliseconds timediff = duration_cast<milliseconds>( system_clock::now().time_since_epoch()) - start; // change avg hashrate in global memory (*hashrates)[deviceId] = (double)NONCES_PER_ITER * (double)NCycles / ((double)1000 * timediff.count()); start = duration_cast<milliseconds>( system_clock::now().time_since_epoch()); (*tstamps)[deviceId] = start.count(); } // if solution was found by this thread wait for new block to come if (state == STATE_KEYGEN) { while (info->blockId.load() == blockId) { std::this_thread::sleep_for(std::chrono::milliseconds(10)); } state = STATE_CONTINUE; } // if proxy is disconnected wait for connection while (!info->doJob) { std::this_thread::sleep_for(std::chrono::milliseconds(10)); // LOG(INFO) << "GPU " << deviceId << " problem in proxy "; } uint_t controlId = info->blockId.load(); if (blockId != controlId) { // if info->blockId changed // read new message and bound to thread-local mem info->info_mutex.lock(); memcpy(mes_h, info->mes, NUM_SIZE_8); memcpy(bound_h, info->bound, NUM_SIZE_8); //divide nonces between gpus memcpy(&EndNonce, info->extraNonceEnd, NONCE_SIZE_8); memcpy(&base, info->extraNonceStart, NONCE_SIZE_8); uint64_t nonceChunk = 1 + (EndNonce - base) / totalGPUCards; base = *((uint64_t *)info->extraNonceStart) + deviceId * nonceChunk; EndNonce = base + nonceChunk; //LOG(INFO) << "gpu: " << deviceId << " base: " << base << " end: " << EndNonce; memcpy(&height, info->Hblock, HEIGHT_SIZE); info->info_mutex.unlock(); //LOG(INFO) << "GPU " << deviceId << " read new block data"; blockId = controlId; VLOG(1) << "Generated new keypair," << " copying new data in device memory now"; // copy message CUDA_CALL(cudaMemcpy( ((uint8_t *)data_d), mes_h, NUM_SIZE_8, cudaMemcpyHostToDevice)); VLOG(1) << "Starting prehashing with new block data"; Prehash(hashes_d, height); // calculate unfinalized hash of message VLOG(1) << "Starting InitMining"; //InitMining(&ctx_h, (uint32_t *)mes_h, NUM_SIZE_8); LOG(INFO) << "GPU " << deviceId << " started"; //cpyCtxSymbol((ctx_t*)(&ctx_h)); cpyBSymbol(bound_h); CUDA_CALL(cudaDeviceSynchronize()); state = STATE_CONTINUE; } //LOG(INFO) << "Starting main BlockMining procedure"; // calculate solution candidates VLOG(1) << "Starting main BlockMining procedure"; BlockMiningStep1<<<1 + (THREADS_PER_ITER - 1) / (BLOCK_DIM * 4), BLOCK_DIM>>>(data_d, base, hashes_d, BHashes); BlockMiningStep2<<<1 + (THREADS_PER_ITER - 1) / BLOCK_DIM, BLOCK_DIM>>>(data_d, base, height, hashes_d, indices_d, count_d, BHashes); VLOG(1) << "Trying to find solution"; // restart iteration if new block was found if (blockId != info->blockId.load()) { continue; } CUDA_CALL(cudaMemcpy( indices_h, indices_d, MAX_SOLS * sizeof(uint32_t), cudaMemcpyDeviceToHost)); // solution found if (indices_h[0]) { int i = 0; while (indices_h[i] && (i < 16 /*MAX_SOLS*/)) { if (!info->stratumMode && i != 0) { break; } *((uint64_t *)nonce) = base + indices_h[i] - 1; uint64_t endNonceT; memcpy(&endNonceT, info->extraNonceEnd, sizeof(uint64_t)); if ((*((uint64_t *)nonce)) <= endNonceT) { //LOG(INFO) << "sol check i: " << i << " sol index: "<< indices_h[i]; bool checksol = solVerifier.RunAlg(info->mes, nonce, info->bound, info->Hblock); if (checksol) { MinerShare share(*((uint64_t *)nonce)); shQueue->put(share); if (!info->stratumMode) { state = STATE_KEYGEN; //end_jobs.fetch_add(1, std::memory_order_relaxed); break; } } else { LOG(INFO) << " problem in verify solution, nonce: " << *((uint64_t *)nonce); //exit(0); } } else { LOG(INFO) << "nonce greater than end nonce, nonce: " << *((uint64_t *)nonce) << " endNonce: " << endNonceT; } i++; } memset(indices_h, 0, MAX_SOLS * sizeof(uint32_t)); CUDA_CALL(cudaMemset( indices_d, 0, MAX_SOLS * sizeof(uint32_t))); CUDA_CALL(cudaMemset(count_d, 0, sizeof(uint32_t))); } base += NONCES_PER_ITER; if (base > EndNonce) //end work { state = STATE_KEYGEN; end_jobs.fetch_add(1, std::memory_order_relaxed); } } while (1); } //////////////////////////////////////////////////////////////////////////////// // Main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { /************************************************************************** * Get conf files **************************************************************************/ char confName[14] = "config.json"; char *fileName = (argc > 0) ? confName : argv[1]; /** TODO: add argument flag ex: -logconf log.conf*/ bool haslog = (argc > 1); //========================================================================// // Setup log //========================================================================// if (haslog) { char *logFile = argv[2]; // Load configuration from file el::Configurations conf(logFile); // Reconfigure single logger el::Loggers::reconfigureLogger("default", conf); // Actually reconfigure all loggers instead el::Loggers::reconfigureAllLoggers(conf); // Now all the loggers will use configuration from file LOG(INFO) << "got file: " << logFile; } else { START_EASYLOGGINGPP(argc, argv); el::Loggers::reconfigureAllLoggers(el::ConfigurationType::Format, "%datetime %level [%thread] %msg"); LOG(INFO) << "using default logging conf: "; } el::Helpers::setThreadName("main thread"); //========================================================================// // Check GPU availability //========================================================================// int deviceCount; int status = EXIT_SUCCESS; if (cudaGetDeviceCount(&deviceCount) != cudaSuccess) { LOG(ERROR) << "Error checking GPU"; return EXIT_FAILURE; } LOG(INFO) << "Using " << deviceCount << " GPU devices"; //========================================================================// // Read configuration file //========================================================================// char from[MAX_URL_SIZE]; info_t info; info.blockId = 0; info.keepPrehash = 0; BlockQueue<MinerShare> solQueue; LOG(INFO) << "Using configuration file " << fileName; // check access to config file if (access(fileName, F_OK) == -1) { LOG(ERROR) << "Configuration file " << fileName << " is not found"; return EXIT_FAILURE; } // read configuration from file status = ReadConfig( fileName, from, info.to, info.endJob); if (status == EXIT_FAILURE) { return EXIT_FAILURE; } LOG(INFO) << "Block getting URL:\n " << from; LOG(INFO) << "Solution posting URL:\n " << info.to; //========================================================================// // Setup CURL //========================================================================// // CURL http request json_t request(0, REQ_LEN); // CURL init PERSISTENT_CALL_STATUS(curl_global_init(CURL_GLOBAL_ALL), CURLE_OK); //========================================================================// // Fork miner threads //========================================================================// std::vector<std::thread> miners(deviceCount); std::vector<double> hashrates(deviceCount); std::vector<int> lastTimestamps(deviceCount); std::vector<int> timestamps(deviceCount); // PCI bus and device IDs std::vector<std::pair<int, int>> devinfos(deviceCount); for (int i = 0; i < deviceCount; ++i) { cudaDeviceProp props; if (cudaGetDeviceProperties(&props, i) == cudaSuccess) { devinfos[i] = std::make_pair(props.pciBusID, props.pciDeviceID); } miners[i] = std::thread(MinerThread, deviceCount, i, &info, &hashrates, &timestamps, &solQueue); hashrates[i] = 0; lastTimestamps[i] = 1; timestamps[i] = 0; } // get first block status = EXIT_FAILURE; while (status != EXIT_SUCCESS) { status = GetLatestBlock(from, &request, &info, 0); std::this_thread::sleep_for(std::chrono::milliseconds(800)); if (status != EXIT_SUCCESS) { LOG(INFO) << "Waiting for block data to be published by node..."; } } std::thread solSender(SenderThread, &info, &solQueue); std::thread httpApi = std::thread(HttpApiThread, &hashrates, &devinfos); //========================================================================// // Main thread get-block cycle //========================================================================// uint_t curlcnt = 0; const uint_t curltimes = 1000; milliseconds ms = milliseconds::zero(); // bomb node with HTTP with 10ms intervals, if new block came // signal miners with blockId while (1) { milliseconds start = duration_cast<milliseconds>( system_clock::now().time_since_epoch()); // get latest block status = GetLatestBlock(from, &request, &info, 0); if (status != EXIT_SUCCESS) { LOG(INFO) << "Getting block error"; } ms += duration_cast<milliseconds>( system_clock::now().time_since_epoch()) - start; ++curlcnt; if (!(curlcnt % curltimes)) { LOG(INFO) << "Average curling time " << ms.count() / (double)curltimes << " ms"; LOG(INFO) << "Current block candidate: " << request.ptr; ms = milliseconds::zero(); std::stringstream hrBuffer; hrBuffer << "Average hashrates: "; double totalHr = 0; for (int i = 0; i < deviceCount; ++i) { // check if miner thread is updating hashrate, e.g. alive if (!(curlcnt % (5 * curltimes))) { if (lastTimestamps[i] == timestamps[i]) { hashrates[i] = 0; } lastTimestamps[i] = timestamps[i]; } hrBuffer << "GPU" << i << " " << hashrates[i] << " MH/s "; totalHr += hashrates[i]; } hrBuffer << "Total " << totalHr << " MH/s "; LOG(INFO) << hrBuffer.str(); } std::this_thread::sleep_for(std::chrono::milliseconds(100)); int completeMiners = end_jobs.load(); if (completeMiners >= deviceCount) { end_jobs.store(0); JobCompleted(info.endJob); } } return EXIT_SUCCESS; } // autolykos.cu
3576b4d51f3ad3678e8d2d6dbe6e0b49209ce5f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31) { if (comp == (+1.6324E-35f / +1.7344E-22f + +1.5046E34f)) { comp = var_2 * -1.2741E-35f; comp += (-1.2420E-37f / -1.5003E-44f); comp = (-1.2703E23f - -1.4485E35f - (-1.0363E-25f / -0.0f)); for (int i=0; i < var_1; ++i) { float tmp_1 = (-1.8322E36f - +0.0f + var_3 - +1.4168E35f); comp = tmp_1 * ceilf((var_4 / var_5 + var_6 / var_7)); } if (comp < (var_8 - -1.5338E-37f * tanhf(var_9 - -1.7752E-41f + var_10))) { comp = -1.6042E-44f + (var_11 + +1.4285E34f); comp += -1.1709E-44f / atanf((-1.9398E34f * (-1.1727E-43f + (var_12 + fabsf(powf((var_13 * (var_14 + fabsf((var_15 / var_16 - (var_17 + var_18))))), +1.3976E36f)))))); comp += (-1.2307E-37f - (+0.0f / var_19 - coshf((-1.5122E-19f * atan2f(+1.9070E-36f, -0.0f))))); } if (comp > -1.3863E6f / -1.8961E34f - cosf(var_20 / ceilf((var_21 * var_22 * (var_23 * (var_24 / -0.0f)))))) { float tmp_2 = -1.8267E36f; float tmp_3 = fabsf(-1.2176E22f * tanhf((var_25 / var_26))); comp += tmp_3 / tmp_2 + -1.3408E-37f + floorf((-1.4715E-42f + logf(-1.9427E13f - powf(var_27 - var_28 - ceilf(-1.1814E16f * (+1.9660E7f * -1.6151E-1f / cosf((var_29 - (var_30 + var_31))))), coshf(-1.9162E36f))))); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); float tmp_30 = atof(argv[30]); float tmp_31 = atof(argv[31]); float tmp_32 = atof(argv[32]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32); hipDeviceSynchronize(); return 0; }
3576b4d51f3ad3678e8d2d6dbe6e0b49209ce5f1.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31) { if (comp == (+1.6324E-35f / +1.7344E-22f + +1.5046E34f)) { comp = var_2 * -1.2741E-35f; comp += (-1.2420E-37f / -1.5003E-44f); comp = (-1.2703E23f - -1.4485E35f - (-1.0363E-25f / -0.0f)); for (int i=0; i < var_1; ++i) { float tmp_1 = (-1.8322E36f - +0.0f + var_3 - +1.4168E35f); comp = tmp_1 * ceilf((var_4 / var_5 + var_6 / var_7)); } if (comp < (var_8 - -1.5338E-37f * tanhf(var_9 - -1.7752E-41f + var_10))) { comp = -1.6042E-44f + (var_11 + +1.4285E34f); comp += -1.1709E-44f / atanf((-1.9398E34f * (-1.1727E-43f + (var_12 + fabsf(powf((var_13 * (var_14 + fabsf((var_15 / var_16 - (var_17 + var_18))))), +1.3976E36f)))))); comp += (-1.2307E-37f - (+0.0f / var_19 - coshf((-1.5122E-19f * atan2f(+1.9070E-36f, -0.0f))))); } if (comp > -1.3863E6f / -1.8961E34f - cosf(var_20 / ceilf((var_21 * var_22 * (var_23 * (var_24 / -0.0f)))))) { float tmp_2 = -1.8267E36f; float tmp_3 = fabsf(-1.2176E22f * tanhf((var_25 / var_26))); comp += tmp_3 / tmp_2 + -1.3408E-37f + floorf((-1.4715E-42f + logf(-1.9427E13f - powf(var_27 - var_28 - ceilf(-1.1814E16f * (+1.9660E7f * -1.6151E-1f / cosf((var_29 - (var_30 + var_31))))), coshf(-1.9162E36f))))); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); float tmp_30 = atof(argv[30]); float tmp_31 = atof(argv[31]); float tmp_32 = atof(argv[32]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32); cudaDeviceSynchronize(); return 0; }
bc64ac5d2c1b2c51c17bc457234a4167b62b7235.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kWriteRowsMult.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *data = NULL; hipMalloc(&data, XSIZE*YSIZE); float *target = NULL; hipMalloc(&target, XSIZE*YSIZE); int num_images = 1; int num_modules = 1; int num_modules_batch = 2; int module_id_offset = 1; float alpha = 2; float beta = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kWriteRowsMult), dim3(gridBlock),dim3(threadBlock), 0, 0, data,target,num_images,num_modules,num_modules_batch,module_id_offset,alpha,beta); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kWriteRowsMult), dim3(gridBlock),dim3(threadBlock), 0, 0, data,target,num_images,num_modules,num_modules_batch,module_id_offset,alpha,beta); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kWriteRowsMult), dim3(gridBlock),dim3(threadBlock), 0, 0, data,target,num_images,num_modules,num_modules_batch,module_id_offset,alpha,beta); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
bc64ac5d2c1b2c51c17bc457234a4167b62b7235.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kWriteRowsMult.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *data = NULL; cudaMalloc(&data, XSIZE*YSIZE); float *target = NULL; cudaMalloc(&target, XSIZE*YSIZE); int num_images = 1; int num_modules = 1; int num_modules_batch = 2; int module_id_offset = 1; float alpha = 2; float beta = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kWriteRowsMult<<<gridBlock,threadBlock>>>(data,target,num_images,num_modules,num_modules_batch,module_id_offset,alpha,beta); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kWriteRowsMult<<<gridBlock,threadBlock>>>(data,target,num_images,num_modules,num_modules_batch,module_id_offset,alpha,beta); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kWriteRowsMult<<<gridBlock,threadBlock>>>(data,target,num_images,num_modules,num_modules_batch,module_id_offset,alpha,beta); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4268979712652ac4468c28bb2b28f7f5e0a8d2f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* % Function: deinterleaver % Description: Deinterleaves ULSCH data from RI and ACK control information % Inputs: input_h Input bits % N_ri_bits Number of RI control bits to deinterleave % N_ack_bits Number of ACK control bits to deinterleave % N_l Number of layers % Qm Number of bits per modulation symbol % ri_h RI control bits to interleave % ack_h ACK control bits to interleave % Outputs: *output_h Output bits % *ri_h Deinterleaved RI control bits % *ack_h Deinterleaved ACK control bits By: Ahmad Nour */ #include "deinterleaver_hip.cuh" __global__ void initializeMatricies(Byte* y_idx_d, Byte* y_mat_d, int N_idx, int N_mat) { int idx = blockIdx.x * blockDim.x + threadIdx.x; //initialize Matricies //Not to run more threads than available data if (idx >= N_mat) return; if (idx < N_idx) { y_idx_d[idx] = 100; y_mat_d[idx] = 0; } else { y_mat_d[idx] = 0; } } __global__ void deinterleaveRI(Byte* y_idx_d, Byte* y_mat_d, Byte* ri_d, int R_prime_mux, int N_ri_bits) { int col = threadIdx.x; int row = blockIdx.y; int idx = row * blockDim.x + col; int C_mux = 12; int Ncol = blockDim.x; //Not to run more threads than available data if (row >= N_ri_bits) return; Byte ri_column_set[4] = { 1, 10, 7, 4 }; //Byte ack_column_set[4] = { 2, 9, 8, 3 }; int r = R_prime_mux - 1 - (row / 4); int C_ri = ri_column_set[(row % 4)]; y_idx_d[r*C_mux + C_ri] = 1; ri_d[row * Ncol + col] = y_mat_d[C_mux*r*Ncol + C_ri*Ncol + col]; } __global__ void deinterleaveData(Byte* y_idx_d, Byte* y_mat_d, Byte* output_d, int numThreads, int H_prime_total, int N_ri_bits, int Qm, int N_l) { const int Ncol = blockDim.x; //Total number of columns int col = threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; int idx = row * Ncol + col; const int C_mux = 12; //printf("if %d > %d", row, numThreads); //Not to run more threads than available data if (row >= numThreads) return; int firstRI_row = H_prime_total - (N_ri_bits * 3); // The original eqn: // firstRI_row = ((H_prime_total/12) - (N_N_ri_bits / 4))*12 if (row < firstRI_row) //No RI bits in this range { y_idx_d[row] = 1; output_d[row * (Qm*N_l) + col] = y_mat_d[row*(Qm*N_l) + col]; } else { /* Now, we reshape the matrix to be of (12 cols): idx 0 1 2 3 4 5 6 7 8 9 10 11701b4032c Data can be put? (No RI) yes no yes yes no yes yes no yes yes no yes So, to map the data to indices where no RI bits exist, this equation is applied: col = col + (col / 2) + (col % 2); */ int old_mapping = (row - firstRI_row); int new_mapping = old_mapping + (old_mapping / 2) + (old_mapping % 2); int new_row = row + (new_mapping - old_mapping); y_idx_d[new_row] = 1; output_d[row * (Qm*N_l) + col] = y_mat_d[new_row*(Qm*N_l) + col]; } //printf("output_d[%d] = %d\n", row * (Qm*N_l) + col, output_d[row * (Qm*N_l) + col]); } __global__ void serialOut(Byte* input_d, Byte* input_d2, Byte* input_d3, Byte* input_d4, Byte* y_mat_d, const int N, int Nrows, int Qm, int N_l) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int z = blockDim.z * blockIdx.z + threadIdx.z; int idx = y * blockDim.x + x + z * (Nrows * blockDim.x); const int C_mux = 12; //Not to run more threads than available data if (y >= Nrows) return; if (idx <( N / N_l)) y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x] = input_d[idx]; else if (idx <(2*N / N_l)) y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x] = input_d2[idx - (N / N_l)]; else if (idx <(3 * N / N_l)) y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x] = input_d3[idx - (2 * N / N_l)]; else y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x] = input_d4[idx - (3 * N / N_l)]; //printf("y_mat_d[%d] = %d\n", y*C_mux*Qm*N_l + z*Qm*N_l + x, y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x]); } void deinterleaver(Byte* input_d, Byte* input_d2, Byte* input_d3, Byte* input_d4, Byte** ri_d, Byte** output_d, const int N, const int N_ri, const int Qm, const int N_l, Byte* y_idx_d, Byte* y_mat_d) { // Step 1: Define C_mux int C_mux = N_pusch_symbs; // Step 2: Define R_mux and R_prime_mux int H_prime_total = N / (Qm*N_l); int H_prime = H_prime_total - N_ri; int R_mux = N / C_mux; int R_prime_mux = R_mux / (Qm*N_l); // Initialize the matricies //Calc. number of needed threads for calling kernel(s) int numThreads = (C_mux*R_mux); int blockDim = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread) int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); //grid size in bloack (min 1) //Calling the kernel(s) initializeMatricies << <gridDim, blockDim >> > (y_idx_d, y_mat_d, (C_mux*R_prime_mux), (C_mux*R_mux)); // Step 6: Construct matrix //Calc. number of needed threads for calling kernel(s) numThreads = C_mux * R_prime_mux * (Qm*N_l); int rows = (numThreads < (1024)) ? numThreads : (1024 / (C_mux*(Qm*N_l))); int gridY = numThreads / (rows*(C_mux*(Qm*N_l))) + (numThreads % (rows*(C_mux*(Qm*N_l))) == 0 ? 0 : 1); //grid size in bloack (min 1) dim3 blockDim_3((Qm*N_l), rows, C_mux); dim3 gridDim_3(1, gridY); serialOut << <gridDim_3, blockDim_3 >> >(input_d, input_d2, input_d3, input_d4, y_mat_d, N, R_prime_mux, Qm, N_l); // Step 3: Deinterleave the RI control bits if (N_ri != 0) { //Calc. number of needed threads for calling kernel(s) numThreads = N_ri; rows = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread) dim3 blockDim( Qm*N_l,1 ); dim3 gridDim( 1,rows); deinterleaveRI << <gridDim, blockDim >> > (y_idx_d, y_mat_d, *ri_d, R_prime_mux, numThreads); } // Step 4: Deinterleave the data bits //Calc. number of needed threads for calling kernel(s) numThreads = H_prime; //Actually, it's number of required rows or it's total_threads / (Qm*N_l) rows = (numThreads < (1024/ (Qm*N_l))) ? numThreads : (1024/ (Qm*N_l)); gridY = numThreads / (rows)+(numThreads % rows == 0 ? 0 : 1); //grid size in bloack (min 1) dim3 blockDim_2(Qm*N_l, rows); dim3 gridDim_2(1, gridY); //printf("kh\n"); deinterleaveData << <gridDim_2, blockDim_2 >> >(y_idx_d, y_mat_d, *output_d, numThreads, H_prime_total, N_ri, Qm, N_l); //printf("kh\n"); }
4268979712652ac4468c28bb2b28f7f5e0a8d2f2.cu
/* % Function: deinterleaver % Description: Deinterleaves ULSCH data from RI and ACK control information % Inputs: input_h Input bits % N_ri_bits Number of RI control bits to deinterleave % N_ack_bits Number of ACK control bits to deinterleave % N_l Number of layers % Qm Number of bits per modulation symbol % ri_h RI control bits to interleave % ack_h ACK control bits to interleave % Outputs: *output_h Output bits % *ri_h Deinterleaved RI control bits % *ack_h Deinterleaved ACK control bits By: Ahmad Nour */ #include "deinterleaver.cuh" __global__ void initializeMatricies(Byte* y_idx_d, Byte* y_mat_d, int N_idx, int N_mat) { int idx = blockIdx.x * blockDim.x + threadIdx.x; //initialize Matricies //Not to run more threads than available data if (idx >= N_mat) return; if (idx < N_idx) { y_idx_d[idx] = 100; y_mat_d[idx] = 0; } else { y_mat_d[idx] = 0; } } __global__ void deinterleaveRI(Byte* y_idx_d, Byte* y_mat_d, Byte* ri_d, int R_prime_mux, int N_ri_bits) { int col = threadIdx.x; int row = blockIdx.y; int idx = row * blockDim.x + col; int C_mux = 12; int Ncol = blockDim.x; //Not to run more threads than available data if (row >= N_ri_bits) return; Byte ri_column_set[4] = { 1, 10, 7, 4 }; //Byte ack_column_set[4] = { 2, 9, 8, 3 }; int r = R_prime_mux - 1 - (row / 4); int C_ri = ri_column_set[(row % 4)]; y_idx_d[r*C_mux + C_ri] = 1; ri_d[row * Ncol + col] = y_mat_d[C_mux*r*Ncol + C_ri*Ncol + col]; } __global__ void deinterleaveData(Byte* y_idx_d, Byte* y_mat_d, Byte* output_d, int numThreads, int H_prime_total, int N_ri_bits, int Qm, int N_l) { const int Ncol = blockDim.x; //Total number of columns int col = threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; int idx = row * Ncol + col; const int C_mux = 12; //printf("if %d > %d", row, numThreads); //Not to run more threads than available data if (row >= numThreads) return; int firstRI_row = H_prime_total - (N_ri_bits * 3); // The original eqn: // firstRI_row = ((H_prime_total/12) - (N_N_ri_bits / 4))*12 if (row < firstRI_row) //No RI bits in this range { y_idx_d[row] = 1; output_d[row * (Qm*N_l) + col] = y_mat_d[row*(Qm*N_l) + col]; } else { /* Now, we reshape the matrix to be of (12 cols): idx 0 1 2 3 4 5 6 7 8 9 10 11701b4032c Data can be put? (No RI) yes no yes yes no yes yes no yes yes no yes So, to map the data to indices where no RI bits exist, this equation is applied: col = col + (col / 2) + (col % 2); */ int old_mapping = (row - firstRI_row); int new_mapping = old_mapping + (old_mapping / 2) + (old_mapping % 2); int new_row = row + (new_mapping - old_mapping); y_idx_d[new_row] = 1; output_d[row * (Qm*N_l) + col] = y_mat_d[new_row*(Qm*N_l) + col]; } //printf("output_d[%d] = %d\n", row * (Qm*N_l) + col, output_d[row * (Qm*N_l) + col]); } __global__ void serialOut(Byte* input_d, Byte* input_d2, Byte* input_d3, Byte* input_d4, Byte* y_mat_d, const int N, int Nrows, int Qm, int N_l) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int z = blockDim.z * blockIdx.z + threadIdx.z; int idx = y * blockDim.x + x + z * (Nrows * blockDim.x); const int C_mux = 12; //Not to run more threads than available data if (y >= Nrows) return; if (idx <( N / N_l)) y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x] = input_d[idx]; else if (idx <(2*N / N_l)) y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x] = input_d2[idx - (N / N_l)]; else if (idx <(3 * N / N_l)) y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x] = input_d3[idx - (2 * N / N_l)]; else y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x] = input_d4[idx - (3 * N / N_l)]; //printf("y_mat_d[%d] = %d\n", y*C_mux*Qm*N_l + z*Qm*N_l + x, y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x]); } void deinterleaver(Byte* input_d, Byte* input_d2, Byte* input_d3, Byte* input_d4, Byte** ri_d, Byte** output_d, const int N, const int N_ri, const int Qm, const int N_l, Byte* y_idx_d, Byte* y_mat_d) { // Step 1: Define C_mux int C_mux = N_pusch_symbs; // Step 2: Define R_mux and R_prime_mux int H_prime_total = N / (Qm*N_l); int H_prime = H_prime_total - N_ri; int R_mux = N / C_mux; int R_prime_mux = R_mux / (Qm*N_l); // Initialize the matricies //Calc. number of needed threads for calling kernel(s) int numThreads = (C_mux*R_mux); int blockDim = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread) int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); //grid size in bloack (min 1) //Calling the kernel(s) initializeMatricies << <gridDim, blockDim >> > (y_idx_d, y_mat_d, (C_mux*R_prime_mux), (C_mux*R_mux)); // Step 6: Construct matrix //Calc. number of needed threads for calling kernel(s) numThreads = C_mux * R_prime_mux * (Qm*N_l); int rows = (numThreads < (1024)) ? numThreads : (1024 / (C_mux*(Qm*N_l))); int gridY = numThreads / (rows*(C_mux*(Qm*N_l))) + (numThreads % (rows*(C_mux*(Qm*N_l))) == 0 ? 0 : 1); //grid size in bloack (min 1) dim3 blockDim_3((Qm*N_l), rows, C_mux); dim3 gridDim_3(1, gridY); serialOut << <gridDim_3, blockDim_3 >> >(input_d, input_d2, input_d3, input_d4, y_mat_d, N, R_prime_mux, Qm, N_l); // Step 3: Deinterleave the RI control bits if (N_ri != 0) { //Calc. number of needed threads for calling kernel(s) numThreads = N_ri; rows = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread) dim3 blockDim( Qm*N_l,1 ); dim3 gridDim( 1,rows); deinterleaveRI << <gridDim, blockDim >> > (y_idx_d, y_mat_d, *ri_d, R_prime_mux, numThreads); } // Step 4: Deinterleave the data bits //Calc. number of needed threads for calling kernel(s) numThreads = H_prime; //Actually, it's number of required rows or it's total_threads / (Qm*N_l) rows = (numThreads < (1024/ (Qm*N_l))) ? numThreads : (1024/ (Qm*N_l)); gridY = numThreads / (rows)+(numThreads % rows == 0 ? 0 : 1); //grid size in bloack (min 1) dim3 blockDim_2(Qm*N_l, rows); dim3 gridDim_2(1, gridY); //printf("kh\n"); deinterleaveData << <gridDim_2, blockDim_2 >> >(y_idx_d, y_mat_d, *output_d, numThreads, H_prime_total, N_ri, Qm, N_l); //printf("kh\n"); }
86eb1fbf69963d9b62f4076e64e03403f2d2a7bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <accelerate_cuda.h> static TexInt64 arrIn0_0; static TexInt64 arrIn1_0; static TexInt64 arrIn2_0; static TexInt32 arrIn3_0; extern "C" __global__ void permute(const Int64 shIn0_0, const Int64 shIn1_0, const Int64 shIn2_0, const Int64 shIn3_0, const Int64 shOut_0, Int32* __restrict__ arrOut_0) { const Int64 shIn0 = shIn3_0; const int shapeSize = shIn0; const int gridSize = __umul24(blockDim.x, gridDim.x); int ix; for (ix = __umul24(blockDim.x, blockIdx.x) + threadIdx.x; ix < shapeSize; ix += gridSize) { const Int64 sh0 = ({ assert(ix >= 0 && ix < shIn0); ix; }); const Int64 v0 = indexArray(arrIn1_0, sh0); const Int64 v1 = (Int64) -1 + shIn3_0 - indexArray(arrIn0_0, sh0); const Word8 v2 = (Int64) 0 == indexArray(arrIn2_0, sh0); const Int64 sh_0 = v2 ? v0 : v1; if (!(sh_0 == -1)) { Int32 y0; Int32 _y0; const Int64 jx0 = sh_0; const Int32 x0 = indexArray(arrIn3_0, ix); arrOut_0[jx0] = x0; } } }
86eb1fbf69963d9b62f4076e64e03403f2d2a7bd.cu
#include <accelerate_cuda.h> static TexInt64 arrIn0_0; static TexInt64 arrIn1_0; static TexInt64 arrIn2_0; static TexInt32 arrIn3_0; extern "C" __global__ void permute(const Int64 shIn0_0, const Int64 shIn1_0, const Int64 shIn2_0, const Int64 shIn3_0, const Int64 shOut_0, Int32* __restrict__ arrOut_0) { const Int64 shIn0 = shIn3_0; const int shapeSize = shIn0; const int gridSize = __umul24(blockDim.x, gridDim.x); int ix; for (ix = __umul24(blockDim.x, blockIdx.x) + threadIdx.x; ix < shapeSize; ix += gridSize) { const Int64 sh0 = ({ assert(ix >= 0 && ix < shIn0); ix; }); const Int64 v0 = indexArray(arrIn1_0, sh0); const Int64 v1 = (Int64) -1 + shIn3_0 - indexArray(arrIn0_0, sh0); const Word8 v2 = (Int64) 0 == indexArray(arrIn2_0, sh0); const Int64 sh_0 = v2 ? v0 : v1; if (!(sh_0 == -1)) { Int32 y0; Int32 _y0; const Int64 jx0 = sh_0; const Int32 x0 = indexArray(arrIn3_0, ix); arrOut_0[jx0] = x0; } } }
396698dd5d581e52f15f4f8718873bd3352e3ba4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/sparse/convert/coo.cuh> #include <raft/sparse/coo.hpp> #include <raft/sparse/linalg/symmetrize.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include "../test_utils.cuh" #include <iostream> namespace raft { namespace sparse { template <typename value_idx, typename value_t> __global__ void assert_symmetry( value_idx* rows, value_idx* cols, value_t* vals, value_idx nnz, value_idx* sum) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= nnz) return; atomicAdd(sum, rows[tid]); atomicAdd(sum, -1 * cols[tid]); } template <typename value_idx, typename value_t> struct SparseSymmetrizeInputs { value_idx n_cols; std::vector<value_idx> indptr_h; std::vector<value_idx> indices_h; std::vector<value_t> data_h; }; template <typename value_idx, typename value_t> ::std::ostream& operator<<(::std::ostream& os, const SparseSymmetrizeInputs<value_idx, value_t>& dims) { return os; } template <typename value_idx, typename value_t> class SparseSymmetrizeTest : public ::testing::TestWithParam<SparseSymmetrizeInputs<value_idx, value_t>> { public: SparseSymmetrizeTest() : params(::testing::TestWithParam<SparseSymmetrizeInputs<value_idx, value_t>>::GetParam()), stream(resource::get_cuda_stream(handle)), indptr(0, stream), indices(0, stream), data(0, stream) { } protected: void make_data() { std::vector<value_idx> indptr_h = params.indptr_h; std::vector<value_idx> indices_h = params.indices_h; std::vector<value_t> data_h = params.data_h; indptr.resize(indptr_h.size(), stream); indices.resize(indices_h.size(), stream); data.resize(data_h.size(), stream); update_device(indptr.data(), indptr_h.data(), indptr_h.size(), stream); update_device(indices.data(), indices_h.data(), indices_h.size(), stream); update_device(data.data(), data_h.data(), data_h.size(), stream); } void SetUp() override { make_data(); value_idx m = params.indptr_h.size() - 1; value_idx n = params.n_cols; value_idx nnz = params.indices_h.size(); rmm::device_uvector<value_idx> coo_rows(nnz, stream); raft::sparse::convert::csr_to_coo(indptr.data(), m, coo_rows.data(), nnz, stream); raft::sparse::COO<value_t, value_idx> out(stream); raft::sparse::linalg::symmetrize( handle, coo_rows.data(), indices.data(), data.data(), m, n, coo_rows.size(), out); rmm::device_scalar<value_idx> sum(stream); sum.set_value_to_zero_async(stream); hipLaunchKernelGGL(( assert_symmetry), dim3(raft::ceildiv(out.nnz, 256)), dim3(256), 0, stream, out.rows(), out.cols(), out.vals(), out.nnz, sum.data()); sum_h = sum.value(stream); resource::sync_stream(handle, stream); } protected: raft::resources handle; hipStream_t stream; // input data rmm::device_uvector<value_idx> indptr, indices; rmm::device_uvector<value_t> data; value_idx sum_h; SparseSymmetrizeInputs<value_idx, value_t> params; }; template <typename T> struct COOSymmetrizeInputs { int m, n, nnz; unsigned long long int seed; }; template <typename T> class COOSymmetrizeTest : public ::testing::TestWithParam<COOSymmetrizeInputs<T>> { protected: void SetUp() override {} void TearDown() override {} }; const std::vector<COOSymmetrizeInputs<float>> inputsf = {{5, 10, 5, 1234ULL}}; typedef COOSymmetrizeTest<float> COOSymmetrize; TEST_P(COOSymmetrize, Result) { hipStream_t stream; hipStreamCreate(&stream); int nnz = 8; int* in_rows_h = new int[nnz]{0, 0, 1, 1, 2, 2, 3, 3}; int* in_cols_h = new int[nnz]{1, 3, 2, 3, 0, 1, 0, 2}; float* in_vals_h = new float[nnz]{0.5, 1.0, 0.5, 0.5, 0.5, 0.0, 0.5, 0.5}; int* exp_rows_h = new int[nnz * 2]{1, 0, 0, 0, 1, 3, 1, 0, 0, 2, 2, 0, 3, 2, 3, 0}; int* exp_cols_h = new int[nnz * 2]{0, 1, 3, 0, 2, 1, 3, 0, 2, 0, 1, 0, 0, 3, 2, 0}; float* exp_vals_h = new float[nnz * 2]{0.5, 0.5, 1.5, 0, 0.5, 0.5, 0.5, 0, 0.5, 0.5, 0.5, 0, 1.5, 0.5, 0.5, 0.0}; COO<float> in(stream, nnz, 4, 4); raft::update_device(in.rows(), *&in_rows_h, nnz, stream); raft::update_device(in.cols(), *&in_cols_h, nnz, stream); raft::update_device(in.vals(), *&in_vals_h, nnz, stream); COO<float> out(stream); linalg::coo_symmetrize<float>( &in, &out, [] __device__(int row, int col, float val, float trans) { return val + trans; }, stream); RAFT_CUDA_TRY(hipStreamSynchronize(stream)); std::cout << out << std::endl; ASSERT_TRUE(out.nnz == nnz * 2); ASSERT_TRUE(raft::devArrMatch<int>(out.rows(), exp_rows_h, out.nnz, raft::Compare<int>())); ASSERT_TRUE(raft::devArrMatch<int>(out.cols(), exp_cols_h, out.nnz, raft::Compare<int>())); ASSERT_TRUE(raft::devArrMatch<float>(out.vals(), exp_vals_h, out.nnz, raft::Compare<float>())); hipStreamDestroy(stream); delete[] in_rows_h; delete[] in_cols_h; delete[] in_vals_h; delete[] exp_rows_h; delete[] exp_cols_h; delete[] exp_vals_h; } INSTANTIATE_TEST_CASE_P(COOSymmetrizeTest, COOSymmetrize, ::testing::ValuesIn(inputsf)); const std::vector<SparseSymmetrizeInputs<int, float>> symm_inputs_fint = { // Test n_clusters == n_points { 2, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, {1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f}, }, {2, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, // indices {1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f}}, }; typedef SparseSymmetrizeTest<int, float> SparseSymmetrizeTestF_int; TEST_P(SparseSymmetrizeTestF_int, Result) { ASSERT_TRUE(sum_h == 0); } INSTANTIATE_TEST_CASE_P(SparseSymmetrizeTest, SparseSymmetrizeTestF_int, ::testing::ValuesIn(symm_inputs_fint)); } // namespace sparse } // namespace raft
396698dd5d581e52f15f4f8718873bd3352e3ba4.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/sparse/convert/coo.cuh> #include <raft/sparse/coo.hpp> #include <raft/sparse/linalg/symmetrize.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include "../test_utils.cuh" #include <iostream> namespace raft { namespace sparse { template <typename value_idx, typename value_t> __global__ void assert_symmetry( value_idx* rows, value_idx* cols, value_t* vals, value_idx nnz, value_idx* sum) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= nnz) return; atomicAdd(sum, rows[tid]); atomicAdd(sum, -1 * cols[tid]); } template <typename value_idx, typename value_t> struct SparseSymmetrizeInputs { value_idx n_cols; std::vector<value_idx> indptr_h; std::vector<value_idx> indices_h; std::vector<value_t> data_h; }; template <typename value_idx, typename value_t> ::std::ostream& operator<<(::std::ostream& os, const SparseSymmetrizeInputs<value_idx, value_t>& dims) { return os; } template <typename value_idx, typename value_t> class SparseSymmetrizeTest : public ::testing::TestWithParam<SparseSymmetrizeInputs<value_idx, value_t>> { public: SparseSymmetrizeTest() : params(::testing::TestWithParam<SparseSymmetrizeInputs<value_idx, value_t>>::GetParam()), stream(resource::get_cuda_stream(handle)), indptr(0, stream), indices(0, stream), data(0, stream) { } protected: void make_data() { std::vector<value_idx> indptr_h = params.indptr_h; std::vector<value_idx> indices_h = params.indices_h; std::vector<value_t> data_h = params.data_h; indptr.resize(indptr_h.size(), stream); indices.resize(indices_h.size(), stream); data.resize(data_h.size(), stream); update_device(indptr.data(), indptr_h.data(), indptr_h.size(), stream); update_device(indices.data(), indices_h.data(), indices_h.size(), stream); update_device(data.data(), data_h.data(), data_h.size(), stream); } void SetUp() override { make_data(); value_idx m = params.indptr_h.size() - 1; value_idx n = params.n_cols; value_idx nnz = params.indices_h.size(); rmm::device_uvector<value_idx> coo_rows(nnz, stream); raft::sparse::convert::csr_to_coo(indptr.data(), m, coo_rows.data(), nnz, stream); raft::sparse::COO<value_t, value_idx> out(stream); raft::sparse::linalg::symmetrize( handle, coo_rows.data(), indices.data(), data.data(), m, n, coo_rows.size(), out); rmm::device_scalar<value_idx> sum(stream); sum.set_value_to_zero_async(stream); assert_symmetry<<<raft::ceildiv(out.nnz, 256), 256, 0, stream>>>( out.rows(), out.cols(), out.vals(), out.nnz, sum.data()); sum_h = sum.value(stream); resource::sync_stream(handle, stream); } protected: raft::resources handle; cudaStream_t stream; // input data rmm::device_uvector<value_idx> indptr, indices; rmm::device_uvector<value_t> data; value_idx sum_h; SparseSymmetrizeInputs<value_idx, value_t> params; }; template <typename T> struct COOSymmetrizeInputs { int m, n, nnz; unsigned long long int seed; }; template <typename T> class COOSymmetrizeTest : public ::testing::TestWithParam<COOSymmetrizeInputs<T>> { protected: void SetUp() override {} void TearDown() override {} }; const std::vector<COOSymmetrizeInputs<float>> inputsf = {{5, 10, 5, 1234ULL}}; typedef COOSymmetrizeTest<float> COOSymmetrize; TEST_P(COOSymmetrize, Result) { cudaStream_t stream; cudaStreamCreate(&stream); int nnz = 8; int* in_rows_h = new int[nnz]{0, 0, 1, 1, 2, 2, 3, 3}; int* in_cols_h = new int[nnz]{1, 3, 2, 3, 0, 1, 0, 2}; float* in_vals_h = new float[nnz]{0.5, 1.0, 0.5, 0.5, 0.5, 0.0, 0.5, 0.5}; int* exp_rows_h = new int[nnz * 2]{1, 0, 0, 0, 1, 3, 1, 0, 0, 2, 2, 0, 3, 2, 3, 0}; int* exp_cols_h = new int[nnz * 2]{0, 1, 3, 0, 2, 1, 3, 0, 2, 0, 1, 0, 0, 3, 2, 0}; float* exp_vals_h = new float[nnz * 2]{0.5, 0.5, 1.5, 0, 0.5, 0.5, 0.5, 0, 0.5, 0.5, 0.5, 0, 1.5, 0.5, 0.5, 0.0}; COO<float> in(stream, nnz, 4, 4); raft::update_device(in.rows(), *&in_rows_h, nnz, stream); raft::update_device(in.cols(), *&in_cols_h, nnz, stream); raft::update_device(in.vals(), *&in_vals_h, nnz, stream); COO<float> out(stream); linalg::coo_symmetrize<float>( &in, &out, [] __device__(int row, int col, float val, float trans) { return val + trans; }, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); std::cout << out << std::endl; ASSERT_TRUE(out.nnz == nnz * 2); ASSERT_TRUE(raft::devArrMatch<int>(out.rows(), exp_rows_h, out.nnz, raft::Compare<int>())); ASSERT_TRUE(raft::devArrMatch<int>(out.cols(), exp_cols_h, out.nnz, raft::Compare<int>())); ASSERT_TRUE(raft::devArrMatch<float>(out.vals(), exp_vals_h, out.nnz, raft::Compare<float>())); cudaStreamDestroy(stream); delete[] in_rows_h; delete[] in_cols_h; delete[] in_vals_h; delete[] exp_rows_h; delete[] exp_cols_h; delete[] exp_vals_h; } INSTANTIATE_TEST_CASE_P(COOSymmetrizeTest, COOSymmetrize, ::testing::ValuesIn(inputsf)); const std::vector<SparseSymmetrizeInputs<int, float>> symm_inputs_fint = { // Test n_clusters == n_points { 2, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, {1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f}, }, {2, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, // indices {1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f}}, }; typedef SparseSymmetrizeTest<int, float> SparseSymmetrizeTestF_int; TEST_P(SparseSymmetrizeTestF_int, Result) { ASSERT_TRUE(sum_h == 0); } INSTANTIATE_TEST_CASE_P(SparseSymmetrizeTest, SparseSymmetrizeTestF_int, ::testing::ValuesIn(symm_inputs_fint)); } // namespace sparse } // namespace raft
87a44642e4a8875b5871a3fb5cf52d3ef2b39988.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020 NVIDIA CORPORATION. * Copyright (c) Chris Choy ([email protected]). * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #include "coordinate_map_gpu.cuh" #include "coordinate_map_manager.cpp" #include "coordinate_map_manager.hpp" #include <ATen/hip/HIPContext.h> #include <pybind11/pybind11.h> namespace py = pybind11; namespace minkowski { namespace detail { template <typename src_type, typename dst_type> __global__ void cuda_copy_n(src_type const *src, uint32_t N, dst_type *dst) { CUDA_KERNEL_LOOP(index, N) { dst[index] = src[index]; } } template <typename coordinate_type, typename coordinate_field_type, template <typename C> class TemplatedAllocator> struct insert_and_map_functor<coordinate_type, coordinate_field_type, TemplatedAllocator, CoordinateMapGPU> { std::pair<at::Tensor, at::Tensor> operator()( coordinate_map_key_type &map_key, at::Tensor const &th_coordinate, CoordinateMapManager<coordinate_type, coordinate_field_type, TemplatedAllocator, CoordinateMapGPU> &manager) { uint32_t const N = th_coordinate.size(0); uint32_t const coordinate_size = th_coordinate.size(1); coordinate_type *p_coordinate = th_coordinate.data_ptr<coordinate_type>(); auto coordinate_map = CoordinateMapGPU<coordinate_type, TemplatedAllocator>( N, coordinate_size, manager.m_gpu_default_occupancy, map_key.first); LOG_DEBUG("inserting", N, "coordinates with coordinate_size:", coordinate_size); auto input_coordinate_range = coordinate_range<coordinate_type>(N, coordinate_size, p_coordinate); LOG_DEBUG("insert_and_map"); auto map_inverse_map = coordinate_map.template insert_and_map<true>( input_coordinate_range.begin(), input_coordinate_range.end()); LOG_DEBUG("mapping size:", map_inverse_map.first.size()); // insert moves map manager.insert(map_key, coordinate_map); auto const &mapping = map_inverse_map.first; auto const &inverse_mapping = map_inverse_map.second; // return tensors // TODO int64_t LOG_DEBUG("Reserve mapping torch output tensors."); at::Tensor th_mapping = torch::empty( {(int64_t)mapping.size()}, th_coordinate.options().requires_grad(false).dtype(torch::kInt64)); at::Tensor th_inverse_mapping = torch::empty( {(int64_t)inverse_mapping.size()}, th_coordinate.options().requires_grad(false).dtype(torch::kInt64)); auto const num_blocks = (mapping.size() + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; LOG_DEBUG("cuda_copy_n with num_blocks:", num_blocks, "mapping.size():", mapping.size()); hipLaunchKernelGGL(( detail::cuda_copy_n<default_types::index_type, int64_t>) , dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, 0, thrust::raw_pointer_cast(mapping.data()), mapping.size(), th_mapping.data_ptr<int64_t>()); auto const num_inv_blocks = (inverse_mapping.size() + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; LOG_DEBUG("cuda_copy_n with num_inv_blocks:", num_inv_blocks, "inverse_mapping.size():", inverse_mapping.size()); hipLaunchKernelGGL(( detail::cuda_copy_n<default_types::index_type, int64_t>) , dim3(num_inv_blocks), dim3(CUDA_NUM_THREADS), 0, 0, thrust::raw_pointer_cast(inverse_mapping.data()), inverse_mapping.size(), th_inverse_mapping.data_ptr<int64_t>()); CUDA_CHECK(hipStreamSynchronize(0)); return std::make_pair(std::move(th_mapping), std::move(th_inverse_mapping)); } }; template <typename coordinate_type, typename coordinate_field_type, template <typename C> class TemplatedAllocator> struct insert_field_functor< coordinate_type, coordinate_field_type, TemplatedAllocator, CoordinateMapGPU, CoordinateFieldMapGPU<coordinate_field_type, TemplatedAllocator>> { void operator()( coordinate_map_key_type &map_key, at::Tensor const &th_coordinate, CoordinateMapManager<coordinate_type, coordinate_field_type, TemplatedAllocator, CoordinateMapGPU> &manager) { LOG_DEBUG("insert field"); uint32_t const N = th_coordinate.size(0); uint32_t const coordinate_size = th_coordinate.size(1); coordinate_field_type *p_coordinate = th_coordinate.data_ptr<coordinate_field_type>(); auto map = CoordinateFieldMapGPU<coordinate_field_type, TemplatedAllocator>( N, coordinate_size, map_key.first); map.insert(p_coordinate, p_coordinate + N * coordinate_size); LOG_DEBUG("insert map with tensor_stride", map_key.first); manager.insert_field_map(map_key, map); } }; template <typename coordinate_type, template <typename C> class TemplatedAllocator> struct kernel_map_functor< coordinate_type, TemplatedAllocator, CoordinateMapGPU, gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>> { gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>> operator()( CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &in_map, CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &out_map, CUDAKernelMapMode::Mode kernel_map_mode, cpu_kernel_region<coordinate_type> &kernel) { LOG_DEBUG("cpu_kernel_region initialized with volume", kernel.volume()); kernel.to_gpu(); auto gpu_kernel = gpu_kernel_region<coordinate_type>(kernel); LOG_DEBUG("gpu_kernel_region initialization"); return in_map.kernel_map(out_map, gpu_kernel, kernel_map_mode, CUDA_NUM_THREADS); } }; template <typename coordinate_type, template <typename C> class TemplatedAllocator> struct stride_map_functor< coordinate_type, TemplatedAllocator, CoordinateMapGPU, gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>> { gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>> operator()( CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &in_map, CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &out_map, default_types::stride_type const &stride) { return in_map.stride_map(out_map, stride, CUDA_NUM_THREADS); } }; // a partial specialization functor for kernel map in/out swap template <> struct swap_in_out_map_functor<gpu_kernel_map< default_types::index_type, detail::default_allocator<char>>> { using gpu_kernel_map_type = gpu_kernel_map<default_types::index_type, detail::default_allocator<char>>; gpu_kernel_map_type operator()(gpu_kernel_map_type const &kernel_map) { auto swapped_kernel_map = kernel_map.swap(); LOG_DEBUG("Transposed kernel map in_maps:", swapped_kernel_map.out_maps.begin() - swapped_kernel_map.in_maps.begin()); return std::move(swapped_kernel_map); } }; template <> struct swap_in_out_map_functor< gpu_kernel_map<default_types::index_type, detail::c10_allocator<char>>> { using gpu_kernel_map_type = gpu_kernel_map<default_types::index_type, detail::c10_allocator<char>>; gpu_kernel_map_type operator()(gpu_kernel_map_type const &kernel_map) { auto swapped_kernel_map = kernel_map.swap(); LOG_DEBUG("Transposed kernel map in_maps:", swapped_kernel_map.out_maps.begin() - swapped_kernel_map.in_maps.begin()); return std::move(swapped_kernel_map); } }; namespace detail { template <typename dst_type, typename src_type, typename size_type> __global__ void strided_copy(dst_type *__restrict__ dst, // size_type const num_threads, // src_type const *__restrict__ src, // size_type const stride_size) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { dst[x] = src[x * stride_size]; } } } // namespace detail template <typename coordinate_type, template <typename C> class TemplatedAllocator> struct origin_map_functor< coordinate_type, TemplatedAllocator, CoordinateMapGPU, gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>> { std::pair<at::Tensor, std::vector<at::Tensor>> operator()( CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &origin_coordinate_map, gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>> const &origin_map) { auto curr_device = at::hip::current_device(); auto options = torch::TensorOptions({at::kCUDA, curr_device}) .dtype(torch::kLong) .requires_grad(false); auto const out_size = origin_coordinate_map.size(); auto const coordinate_size = origin_coordinate_map.coordinate_size(); at::Tensor batch_indices = torch::empty({out_size}, options); int64_t *d_batch_indices = batch_indices.data_ptr<int64_t>(); LOG_DEBUG("manager origin map strided_copy"); // GPU batch indices are sorted hipLaunchKernelGGL(( detail::strided_copy<int64_t, default_types::dcoordinate_type, default_types::size_type>) , dim3(GET_BLOCKS(out_size, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0, d_batch_indices, out_size, origin_coordinate_map.const_coordinate_data(), coordinate_size); CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("manager batch copy"); std::vector<int64_t> vec_batch_indices(out_size); CUDA_CHECK(hipMemcpy(vec_batch_indices.data(), d_batch_indices, out_size * sizeof(int64_t), hipMemcpyDeviceToHost)); CUDA_CHECK(hipStreamSynchronize(0)); #ifdef DEBUG LOG_DEBUG("Batch indices:", vec_batch_indices); #endif // gpu origin() sort batch indices auto const max_batch_index = vec_batch_indices[out_size - 1]; std::vector<at::Tensor> in_maps; default_types::index_type current_batch_row_index = 0; for (default_types::index_type i = 0; i < (max_batch_index + 1);) { if (vec_batch_indices[current_batch_row_index] == i) { auto p_curr_map = origin_map.in_maps.begin(current_batch_row_index); auto const curr_size = origin_map.size(current_batch_row_index); at::Tensor row_indices = torch::empty({curr_size}, options); int64_t *d_row_indices = row_indices.data_ptr<int64_t>(); LOG_DEBUG("manager batch copy", i); hipLaunchKernelGGL(( detail::strided_copy<int64_t, default_types::index_type, default_types::size_type>) , dim3(GET_BLOCKS(curr_size, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0, d_row_indices, curr_size, p_curr_map, 1); in_maps.push_back(std::move(row_indices)); // if there is a match, move the index. ++current_batch_row_index; if (current_batch_row_index >= out_size) { // Should not happen, but for safety break; } } else { at::Tensor row_indices = torch::empty({0}, options); in_maps.push_back(std::move(row_indices)); } ++i; } CUDA_CHECK(hipStreamSynchronize(0)); return std::make_pair(batch_indices, in_maps); } }; } // namespace detail template class CoordinateMapManager< default_types::dcoordinate_type, default_types::ccoordinate_type, detail::default_allocator, CoordinateMapGPU>; template class CoordinateMapManager<default_types::dcoordinate_type, default_types::ccoordinate_type, detail::c10_allocator, CoordinateMapGPU>; } // end namespace minkowski
87a44642e4a8875b5871a3fb5cf52d3ef2b39988.cu
/* * Copyright (c) 2020 NVIDIA CORPORATION. * Copyright (c) Chris Choy ([email protected]). * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #include "coordinate_map_gpu.cuh" #include "coordinate_map_manager.cpp" #include "coordinate_map_manager.hpp" #include <ATen/cuda/CUDAContext.h> #include <pybind11/pybind11.h> namespace py = pybind11; namespace minkowski { namespace detail { template <typename src_type, typename dst_type> __global__ void cuda_copy_n(src_type const *src, uint32_t N, dst_type *dst) { CUDA_KERNEL_LOOP(index, N) { dst[index] = src[index]; } } template <typename coordinate_type, typename coordinate_field_type, template <typename C> class TemplatedAllocator> struct insert_and_map_functor<coordinate_type, coordinate_field_type, TemplatedAllocator, CoordinateMapGPU> { std::pair<at::Tensor, at::Tensor> operator()( coordinate_map_key_type &map_key, at::Tensor const &th_coordinate, CoordinateMapManager<coordinate_type, coordinate_field_type, TemplatedAllocator, CoordinateMapGPU> &manager) { uint32_t const N = th_coordinate.size(0); uint32_t const coordinate_size = th_coordinate.size(1); coordinate_type *p_coordinate = th_coordinate.data_ptr<coordinate_type>(); auto coordinate_map = CoordinateMapGPU<coordinate_type, TemplatedAllocator>( N, coordinate_size, manager.m_gpu_default_occupancy, map_key.first); LOG_DEBUG("inserting", N, "coordinates with coordinate_size:", coordinate_size); auto input_coordinate_range = coordinate_range<coordinate_type>(N, coordinate_size, p_coordinate); LOG_DEBUG("insert_and_map"); auto map_inverse_map = coordinate_map.template insert_and_map<true>( input_coordinate_range.begin(), input_coordinate_range.end()); LOG_DEBUG("mapping size:", map_inverse_map.first.size()); // insert moves map manager.insert(map_key, coordinate_map); auto const &mapping = map_inverse_map.first; auto const &inverse_mapping = map_inverse_map.second; // return tensors // TODO int64_t LOG_DEBUG("Reserve mapping torch output tensors."); at::Tensor th_mapping = torch::empty( {(int64_t)mapping.size()}, th_coordinate.options().requires_grad(false).dtype(torch::kInt64)); at::Tensor th_inverse_mapping = torch::empty( {(int64_t)inverse_mapping.size()}, th_coordinate.options().requires_grad(false).dtype(torch::kInt64)); auto const num_blocks = (mapping.size() + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; LOG_DEBUG("cuda_copy_n with num_blocks:", num_blocks, "mapping.size():", mapping.size()); detail::cuda_copy_n<default_types::index_type, int64_t> <<<num_blocks, CUDA_NUM_THREADS>>>( thrust::raw_pointer_cast(mapping.data()), mapping.size(), th_mapping.data_ptr<int64_t>()); auto const num_inv_blocks = (inverse_mapping.size() + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; LOG_DEBUG("cuda_copy_n with num_inv_blocks:", num_inv_blocks, "inverse_mapping.size():", inverse_mapping.size()); detail::cuda_copy_n<default_types::index_type, int64_t> <<<num_inv_blocks, CUDA_NUM_THREADS>>>( thrust::raw_pointer_cast(inverse_mapping.data()), inverse_mapping.size(), th_inverse_mapping.data_ptr<int64_t>()); CUDA_CHECK(cudaStreamSynchronize(0)); return std::make_pair(std::move(th_mapping), std::move(th_inverse_mapping)); } }; template <typename coordinate_type, typename coordinate_field_type, template <typename C> class TemplatedAllocator> struct insert_field_functor< coordinate_type, coordinate_field_type, TemplatedAllocator, CoordinateMapGPU, CoordinateFieldMapGPU<coordinate_field_type, TemplatedAllocator>> { void operator()( coordinate_map_key_type &map_key, at::Tensor const &th_coordinate, CoordinateMapManager<coordinate_type, coordinate_field_type, TemplatedAllocator, CoordinateMapGPU> &manager) { LOG_DEBUG("insert field"); uint32_t const N = th_coordinate.size(0); uint32_t const coordinate_size = th_coordinate.size(1); coordinate_field_type *p_coordinate = th_coordinate.data_ptr<coordinate_field_type>(); auto map = CoordinateFieldMapGPU<coordinate_field_type, TemplatedAllocator>( N, coordinate_size, map_key.first); map.insert(p_coordinate, p_coordinate + N * coordinate_size); LOG_DEBUG("insert map with tensor_stride", map_key.first); manager.insert_field_map(map_key, map); } }; template <typename coordinate_type, template <typename C> class TemplatedAllocator> struct kernel_map_functor< coordinate_type, TemplatedAllocator, CoordinateMapGPU, gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>> { gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>> operator()( CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &in_map, CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &out_map, CUDAKernelMapMode::Mode kernel_map_mode, cpu_kernel_region<coordinate_type> &kernel) { LOG_DEBUG("cpu_kernel_region initialized with volume", kernel.volume()); kernel.to_gpu(); auto gpu_kernel = gpu_kernel_region<coordinate_type>(kernel); LOG_DEBUG("gpu_kernel_region initialization"); return in_map.kernel_map(out_map, gpu_kernel, kernel_map_mode, CUDA_NUM_THREADS); } }; template <typename coordinate_type, template <typename C> class TemplatedAllocator> struct stride_map_functor< coordinate_type, TemplatedAllocator, CoordinateMapGPU, gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>> { gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>> operator()( CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &in_map, CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &out_map, default_types::stride_type const &stride) { return in_map.stride_map(out_map, stride, CUDA_NUM_THREADS); } }; // a partial specialization functor for kernel map in/out swap template <> struct swap_in_out_map_functor<gpu_kernel_map< default_types::index_type, detail::default_allocator<char>>> { using gpu_kernel_map_type = gpu_kernel_map<default_types::index_type, detail::default_allocator<char>>; gpu_kernel_map_type operator()(gpu_kernel_map_type const &kernel_map) { auto swapped_kernel_map = kernel_map.swap(); LOG_DEBUG("Transposed kernel map in_maps:", swapped_kernel_map.out_maps.begin() - swapped_kernel_map.in_maps.begin()); return std::move(swapped_kernel_map); } }; template <> struct swap_in_out_map_functor< gpu_kernel_map<default_types::index_type, detail::c10_allocator<char>>> { using gpu_kernel_map_type = gpu_kernel_map<default_types::index_type, detail::c10_allocator<char>>; gpu_kernel_map_type operator()(gpu_kernel_map_type const &kernel_map) { auto swapped_kernel_map = kernel_map.swap(); LOG_DEBUG("Transposed kernel map in_maps:", swapped_kernel_map.out_maps.begin() - swapped_kernel_map.in_maps.begin()); return std::move(swapped_kernel_map); } }; namespace detail { template <typename dst_type, typename src_type, typename size_type> __global__ void strided_copy(dst_type *__restrict__ dst, // size_type const num_threads, // src_type const *__restrict__ src, // size_type const stride_size) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { dst[x] = src[x * stride_size]; } } } // namespace detail template <typename coordinate_type, template <typename C> class TemplatedAllocator> struct origin_map_functor< coordinate_type, TemplatedAllocator, CoordinateMapGPU, gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>> { std::pair<at::Tensor, std::vector<at::Tensor>> operator()( CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &origin_coordinate_map, gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>> const &origin_map) { auto curr_device = at::cuda::current_device(); auto options = torch::TensorOptions({at::kCUDA, curr_device}) .dtype(torch::kLong) .requires_grad(false); auto const out_size = origin_coordinate_map.size(); auto const coordinate_size = origin_coordinate_map.coordinate_size(); at::Tensor batch_indices = torch::empty({out_size}, options); int64_t *d_batch_indices = batch_indices.data_ptr<int64_t>(); LOG_DEBUG("manager origin map strided_copy"); // GPU batch indices are sorted detail::strided_copy<int64_t, default_types::dcoordinate_type, default_types::size_type> <<<GET_BLOCKS(out_size, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>( d_batch_indices, out_size, origin_coordinate_map.const_coordinate_data(), coordinate_size); CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("manager batch copy"); std::vector<int64_t> vec_batch_indices(out_size); CUDA_CHECK(cudaMemcpy(vec_batch_indices.data(), d_batch_indices, out_size * sizeof(int64_t), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaStreamSynchronize(0)); #ifdef DEBUG LOG_DEBUG("Batch indices:", vec_batch_indices); #endif // gpu origin() sort batch indices auto const max_batch_index = vec_batch_indices[out_size - 1]; std::vector<at::Tensor> in_maps; default_types::index_type current_batch_row_index = 0; for (default_types::index_type i = 0; i < (max_batch_index + 1);) { if (vec_batch_indices[current_batch_row_index] == i) { auto p_curr_map = origin_map.in_maps.begin(current_batch_row_index); auto const curr_size = origin_map.size(current_batch_row_index); at::Tensor row_indices = torch::empty({curr_size}, options); int64_t *d_row_indices = row_indices.data_ptr<int64_t>(); LOG_DEBUG("manager batch copy", i); detail::strided_copy<int64_t, default_types::index_type, default_types::size_type> <<<GET_BLOCKS(curr_size, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>( d_row_indices, curr_size, p_curr_map, 1); in_maps.push_back(std::move(row_indices)); // if there is a match, move the index. ++current_batch_row_index; if (current_batch_row_index >= out_size) { // Should not happen, but for safety break; } } else { at::Tensor row_indices = torch::empty({0}, options); in_maps.push_back(std::move(row_indices)); } ++i; } CUDA_CHECK(cudaStreamSynchronize(0)); return std::make_pair(batch_indices, in_maps); } }; } // namespace detail template class CoordinateMapManager< default_types::dcoordinate_type, default_types::ccoordinate_type, detail::default_allocator, CoordinateMapGPU>; template class CoordinateMapManager<default_types::dcoordinate_type, default_types::ccoordinate_type, detail::c10_allocator, CoordinateMapGPU>; } // end namespace minkowski
c51c7f45bdfb099fe078fa131358e8b2a029056c.hip
// !!! This is a file automatically generated by hipify!!! /* * Example of how to use the mxGPUArray API in a MEX file. This example shows * how to write a MEX function that takes a gpuArray input and returns a * gpuArray output, e.g. B=mexFunction(A). * * Copyright 2012 The MathWorks, Inc. */ #include "mex.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "gpu/mxGPUArray.h" /* * Device code */ void __global__ TimesTwo(double * const A, int const N) { /* Calculate the global linear index, assuming a 1-d grid. */ int const i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) { A[i] = 2.0 * A[i]; } } /* * Host code */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { /* Declare all variables.*/ mxGPUArray *A; mxGPUArray *B; double *d_A; double *d_B; int N; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; /* Choose a reasonably sized number of threads for the block. */ int const threadsPerBlock = 256; int blocksPerGrid; /* Initialize the MathWorks GPU API. */ mxInitGPU(); /* Throw an error if the input is not a GPU array. */ if ((nrhs!=1) || !(mxIsGPUArray(prhs[0]))) { mexErrMsgIdAndTxt(errId, errMsg); } A = mxGPUCopyFromMxArray(prhs[0]); /* * Verify that A really is a double array before extracting the pointer. */ if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } /* * Now that we have verified the data type, extract a pointer to the input * data on the device. */ d_A = (double *)(mxGPUGetData(A)); /* * Call the kernel using the CUDA runtime API. We are using a 1-d grid here, * and it would be possible for the number of elements to be too large for * the grid. For this example we are not guarding against this possibility. */ N = (int)(mxGPUGetNumberOfElements(A)); blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( TimesTwo), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, N); /* Wrap the result up as a MATLAB gpuArray for return. */ plhs[0] = mxGPUCreateMxArrayOnGPU(A); /* * The mxGPUArray pointers are host-side structures that refer to device * data. These must be destroyed before leaving the MEX function. */ mxGPUDestroyGPUArray(A); // mxGPUDestroyGPUArray(B); }
c51c7f45bdfb099fe078fa131358e8b2a029056c.cu
/* * Example of how to use the mxGPUArray API in a MEX file. This example shows * how to write a MEX function that takes a gpuArray input and returns a * gpuArray output, e.g. B=mexFunction(A). * * Copyright 2012 The MathWorks, Inc. */ #include "mex.h" #include "cuda.h" #include "cuda_runtime.h" #include "gpu/mxGPUArray.h" /* * Device code */ void __global__ TimesTwo(double * const A, int const N) { /* Calculate the global linear index, assuming a 1-d grid. */ int const i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) { A[i] = 2.0 * A[i]; } } /* * Host code */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { /* Declare all variables.*/ mxGPUArray *A; mxGPUArray *B; double *d_A; double *d_B; int N; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; /* Choose a reasonably sized number of threads for the block. */ int const threadsPerBlock = 256; int blocksPerGrid; /* Initialize the MathWorks GPU API. */ mxInitGPU(); /* Throw an error if the input is not a GPU array. */ if ((nrhs!=1) || !(mxIsGPUArray(prhs[0]))) { mexErrMsgIdAndTxt(errId, errMsg); } A = mxGPUCopyFromMxArray(prhs[0]); /* * Verify that A really is a double array before extracting the pointer. */ if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } /* * Now that we have verified the data type, extract a pointer to the input * data on the device. */ d_A = (double *)(mxGPUGetData(A)); /* * Call the kernel using the CUDA runtime API. We are using a 1-d grid here, * and it would be possible for the number of elements to be too large for * the grid. For this example we are not guarding against this possibility. */ N = (int)(mxGPUGetNumberOfElements(A)); blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; TimesTwo<<<blocksPerGrid, threadsPerBlock>>>(d_A, N); /* Wrap the result up as a MATLAB gpuArray for return. */ plhs[0] = mxGPUCreateMxArrayOnGPU(A); /* * The mxGPUArray pointers are host-side structures that refer to device * data. These must be destroyed before leaving the MEX function. */ mxGPUDestroyGPUArray(A); // mxGPUDestroyGPUArray(B); }
1b7ef682f37f6343a348f702af4675d5b57af064.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <pycuda-complex.hpp> #include <pycuda-helpers.hpp> #include <surface_functions.h> #define pi 3.14159265 #define phi 1.6180339 typedef pycuda::complex<cudaPres> pyComplex; extern "C++" { typedef float fp_tex_float; typedef int2 fp_tex_double; typedef uint2 fp_tex_cfloat; typedef int4 fp_tex_cdouble; __device__ void fp_surf2Dwrite(double var,surface<void, cudaSurfaceType2D> surf, int i, int j, enum hipSurfaceBoundaryMode mode) { fp_tex_double auxvar; auxvar.x = __double2loint(var); auxvar.y = __double2hiint(var); surf2Dwrite(auxvar, surf, i*sizeof(fp_tex_double), j, mode); } __device__ void fp_surf2Dwrite(pycuda::complex<float> var,surface<void, cudaSurfaceType2D> surf, int i, int j, enum hipSurfaceBoundaryMode mode) { fp_tex_cfloat auxvar; auxvar.x = __float_as_int(var._M_re); auxvar.y = __float_as_int(var._M_im); surf2Dwrite(auxvar, surf, i*sizeof(fp_tex_cfloat), j,mode); } __device__ void fp_surf2Dwrite(pycuda::complex<double> var,surface<void, cudaSurfaceType2D> surf, int i, int j, enum hipSurfaceBoundaryMode mode) { fp_tex_cdouble auxvar; auxvar.x = __double2loint(var._M_re); auxvar.y = __double2hiint(var._M_re); auxvar.z = __double2loint(var._M_im); auxvar.w = __double2hiint(var._M_im); surf2Dwrite(auxvar, surf, i*sizeof(fp_tex_cdouble), j,mode); } __device__ void fp_surf2Dread(double *var, surface<void, cudaSurfaceType2D> surf, int i, int j, enum hipSurfaceBoundaryMode mode) { fp_tex_double v; surf2Dread(&v, surf, i*sizeof(fp_tex_double), j, mode); *var = __hiloint2double(v.y, v.x); } __device__ void fp_surf2Dread(pycuda::complex<float> *var, surface<void, cudaSurfaceType2D> surf, int i, int j, enum hipSurfaceBoundaryMode mode) { fp_tex_cfloat v; surf2Dread(&v, surf, i*sizeof(fp_tex_cfloat), j, mode); *var = pycuda::complex<float>(__int_as_float(v.x), __int_as_float(v.y)); } __device__ void fp_surf2Dread(pycuda::complex<double> *var, surface<void, cudaSurfaceType2D> surf, int i, int j, enum hipSurfaceBoundaryMode mode) { fp_tex_cdouble v; surf2Dread(&v, surf, i*sizeof(fp_tex_cdouble), j, mode); *var = pycuda::complex<double>(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z)); } } surface< void, cudaSurfaceType3D> surf_psi ; texture< fp_pres, hipTextureType3D, hipReadModeElementType> tex_psi ; surface< void, cudaSurfaceType2DLayered> surf_psi2D; surface< void, cudaSurfaceType2D> surf_psi2DNL; texture< fp_pres, hipTextureType2D, hipReadModeElementType> tex_psi2D ; __device__ cudaPres KspaceFFT(int tid, int nPoint, cudaPres L){ cudaPres Kfft; if (tid < nPoint/2){ Kfft = 2.0f*pi*(tid)/L; } else { Kfft = 2.0f*pi*(tid-nPoint)/L; } return Kfft; } __global__ void gaussian_kernel( cudaPres dx,cudaPres dy, cudaPres dz, cudaPres xMin,cudaPres yMin, cudaPres zMin, cudaPres a,cudaPres b, cudaPres c, cudaPres d,cudaPres e, cudaPres f, int caso, pyComplex *psi){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int t_k = blockIdx.z*blockDim.z + threadIdx.z; int tid = t_i + t_j*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y; cudaPres x=xMin+t_i*dx; cudaPres y=yMin+t_j*dy; cudaPres z=zMin+t_k*dz; pyComplex value; if (caso==0){ value._M_re=exp(-a*x*x-b*y*y-c*z*z); value._M_im=exp(-d*x*x-e*y*y-f*z*z); } if (caso==1){ value._M_re=x; value._M_im=y; } psi[tid] = value; } __global__ void laplaFFT_kernel( cudaPres Lx,cudaPres Ly, cudaPres Lz, int nPointX,int nPointY, int nPointZ, pyComplex *fftTrnf){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int t_k = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_i + gridDim.z * blockDim.z * t_j + t_k; cudaPres kX = KspaceFFT(t_i,nPointX, Lx);//kx[t_j]; cudaPres kY = KspaceFFT(t_j,nPointY, Ly);//ky[t_i]; cudaPres kZ = KspaceFFT(t_k,nPointZ, Lz);//kz[t_k]; cudaPres k2 = kX*kX + kY*kY + kZ*kZ; pyComplex value = fftTrnf[tid]; fftTrnf[tid] = -k2*value; } __global__ void laplaFDtex_kernel(cudaPres dx, cudaPres dy, cudaPres dz, pyComplex *func_d){ int t_x = blockIdx.x*blockDim.x + threadIdx.x; int t_y = blockIdx.y*blockDim.y + threadIdx.y; int t_z = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_x + blockDim.z * gridDim.z * t_y + t_z; pyComplex center, right, left, up, down, top, bottom; center = fp_tex3D(tex_psi, t_z, t_y, t_x); up = fp_tex3D(tex_psi, t_z, t_y+1, t_x); down = fp_tex3D(tex_psi, t_z, t_y-1, t_x); right = fp_tex3D(tex_psi, t_z, t_y, t_x+1); left = fp_tex3D(tex_psi, t_z, t_y, t_x-1); top = fp_tex3D(tex_psi, t_z+1, t_y, t_x); bottom = fp_tex3D(tex_psi, t_z-1, t_y, t_x); cudaPres drInv = 1.0/dy; pyComplex laplacian = (up + down - 2.0cString*center )*drInv*drInv; drInv = 1.0/dx; laplacian += (right + left - 2.0cString*center )*drInv*drInv; drInv = 1.0/dz; laplacian += (top + bottom - 2.0cString*center )*drInv*drInv; func_d[tid] = laplacian; } __global__ void laplaFDsurf_kernel(cudaPres dx, cudaPres dy, cudaPres dz, pyComplex *func_d){ int t_x = blockIdx.x*blockDim.x + threadIdx.x; int t_y = blockIdx.y*blockDim.y + threadIdx.y; int t_z = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_x + blockDim.z * gridDim.z * t_y + t_z; pyComplex center, right, left, up, down, top, bottom; fp_surf3Dread(&center,surf_psi, t_z, t_y, t_x, hipBoundaryModeZero); fp_surf3Dread(&up,surf_psi, t_z, t_y+1, t_x, hipBoundaryModeZero); fp_surf3Dread(&down,surf_psi, t_z, t_y-1, t_x, hipBoundaryModeZero); fp_surf3Dread(&right,surf_psi, t_z, t_y, t_x+1, hipBoundaryModeZero); fp_surf3Dread(&left,surf_psi, t_z, t_y, t_x-1, hipBoundaryModeZero); fp_surf3Dread(&top,surf_psi, t_z+1, t_y, t_x, hipBoundaryModeZero); fp_surf3Dread(&bottom,surf_psi, t_z-1, t_y, t_x, hipBoundaryModeZero); cudaPres drInv = 1.0/dy; pyComplex laplacian = (up + down - 2.0cString*center )*drInv*drInv; drInv = 1.0/dx; laplacian += (right + left - 2.0cString*center )*drInv*drInv; drInv = 1.0/dz; laplacian += (top + bottom - 2.0cString*center )*drInv*drInv; func_d[tid] = laplacian; } __global__ void test_tex_kernel( pyComplex *func_d){ int t_x = blockIdx.x*blockDim.x + threadIdx.x; int t_y = blockIdx.y*blockDim.y + threadIdx.y; int t_z = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_x + blockDim.z * gridDim.z * t_y + t_z; pyComplex center; center = fp_tex3D(tex_psi, t_z, t_y, t_x); func_d[tid] = center; } __global__ void test_surf_kernel(pyComplex *func_d){ int t_x = blockIdx.x*blockDim.x + threadIdx.x; int t_y = blockIdx.y*blockDim.y + threadIdx.y; int t_z = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_x + blockDim.z * gridDim.z * t_y + t_z; pyComplex center; fp_surf3Dread(&center,surf_psi, t_z, t_y, t_x, hipBoundaryModeZero); func_d[tid] = center; } __global__ void setzero_kernel(pyComplex *func_d){ int t_x = blockIdx.x*blockDim.x + threadIdx.x; int t_y = blockIdx.y*blockDim.y + threadIdx.y; int t_z = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_x + blockDim.z * gridDim.z * t_y + t_z; func_d[tid] *=0; } __global__ void write2surf_kernel(pyComplex *realArray1){ // This kernel writes quantum pressure and non linear term of energy int t_x = blockIdx.x*blockDim.x + threadIdx.x; int t_y = blockIdx.y*blockDim.y + threadIdx.y; int t_z = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_x + blockDim.z * gridDim.z * t_y + t_z; pyComplex arr1 = realArray1[tid]; // cudaPres arr2 = realArray2[tid]; // Write to Surfaces fp_surf3Dwrite( arr1, surf_psi, t_z, t_y, t_x, hipBoundaryModeClamp); // fp_surf3Dwrite( arr2, surf_psi0OutImag, t_x*sizeof(cudaPres), t_y, t_z, hipBoundaryModeClamp); } //################################################# 2D __global__ void gaussian_kernel2D( cudaPres dx,cudaPres dy, cudaPres xMin,cudaPres yMin, cudaPres a,cudaPres b, cudaPres d,cudaPres e, int caso, pyComplex *psi){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j + t_i ; cudaPres x=xMin+t_j*dx; cudaPres y=yMin+t_i*dy; pyComplex value; if (caso==0){ value._M_re=exp(-a*x*x-b*y*y); value._M_im=exp(-d*x*x-e*y*y); } if (caso==1){ value._M_re=x; value._M_im=y; } psi[tid] = value; } __global__ void laplaFFT_kernel2D( cudaPres Lx,cudaPres Ly, int nPointX,int nPointY, pyComplex *fftTrnf){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; cudaPres kX = KspaceFFT(t_j,nPointX, Lx);//kx[t_j]; cudaPres kY = KspaceFFT(t_i,nPointY, Ly);//ky[t_i]; //cudaPres kZ = KspaceFFT(t_k,nPointZ, Lz);//kz[t_k]; cudaPres k2 = kX*kX + kY*kY ; pyComplex value = fftTrnf[tid]; fftTrnf[tid] = -k2*value; } __global__ void laplaFDtex_kernel2D(cudaPres dx, cudaPres dy, pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex center, right, left, up, down; center = fp_tex2D(tex_psi2D, t_i, t_j); right = fp_tex2D(tex_psi2D, t_i, t_j+1); left = fp_tex2D(tex_psi2D, t_i, t_j-1); up = fp_tex2D(tex_psi2D, t_i+1, t_j); down = fp_tex2D(tex_psi2D, t_i-1, t_j); cudaPres drInv = 1.0/dy; pyComplex laplacian = (up + down - 2.0cString*center )*drInv*drInv; drInv = 1.0/dx; laplacian += (right + left - 2.0cString*center )*drInv*drInv; //drInv = 1.0/dz; //laplacian += (top + bottom - 2.0cString*center )*drInv*drInv; func_d[tid] = laplacian; } __global__ void laplaFDsurf_kernel2D(cudaPres dx, cudaPres dy, pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex center, right, left, up, down ; fp_surf2DLayeredread(&center,surf_psi2D, t_i, t_j, int(0), hipBoundaryModeZero); fp_surf2DLayeredread(&up, surf_psi2D, t_i+1, t_j, int(0), hipBoundaryModeZero); fp_surf2DLayeredread(&down, surf_psi2D, t_i-1, t_j, int(0), hipBoundaryModeZero); fp_surf2DLayeredread(&right, surf_psi2D, t_i, t_j+1, int(0), hipBoundaryModeZero); fp_surf2DLayeredread(&left, surf_psi2D, t_i, t_j-1, int(0), hipBoundaryModeZero); //fp_surf3Dread(&top,surf_psi, t_z+1, t_y, t_x, hipBoundaryModeZero); //fp_surf3Dread(&bottom,surf_psi, t_z-1, t_y, t_x, hipBoundaryModeZero); cudaPres drInv = 1.0/dy; pyComplex laplacian = (up + down - 2.0cString*center )*drInv*drInv; drInv = 1.0/dx; laplacian += (right + left - 2.0cString*center )*drInv*drInv; func_d[tid] = laplacian; } __global__ void laplaFDsurf_kernel2DNL(cudaPres dx, cudaPres dy, pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex center, right, left, up, down ; fp_surf2Dread(&center,surf_psi2DNL, t_i, t_j, hipBoundaryModeZero); fp_surf2Dread(&up, surf_psi2DNL, t_i+1, t_j, hipBoundaryModeZero); fp_surf2Dread(&down, surf_psi2DNL, t_i-1, t_j, hipBoundaryModeZero); fp_surf2Dread(&right, surf_psi2DNL, t_i, t_j+1, hipBoundaryModeZero); fp_surf2Dread(&left, surf_psi2DNL, t_i, t_j-1, hipBoundaryModeZero); //fp_surf3Dread(&top,surf_psi, t_z+1, t_y, t_x, hipBoundaryModeZero); //fp_surf3Dread(&bottom,surf_psi, t_z-1, t_y, t_x, hipBoundaryModeZero); cudaPres drInv = 1.0/dy; pyComplex laplacian = (up + down - 2.0cString*center )*drInv*drInv; drInv = 1.0/dx; laplacian += (right + left - 2.0cString*center )*drInv*drInv; func_d[tid] = laplacian; } __global__ void test_tex_kernel2D( pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex center; center = fp_tex2D(tex_psi2D, t_i, t_j); func_d[tid] = center; } __global__ void test_surf_kernel2D(pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex center; fp_surf2DLayeredread(&center,surf_psi2D, t_i, t_j, int(0), hipBoundaryModeZero); func_d[tid] = center; } __global__ void test_surf_kernel2DNL(pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex center; fp_surf2Dread(&center,surf_psi2DNL, t_i, t_j, hipBoundaryModeZero); func_d[tid] = center; } __global__ void setzero_kernel2D(pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; func_d[tid] *=0; } __global__ void write2surf_kernel2D(pyComplex *realArray1){ // This kernel writes quantum pressure and non linear term of energy int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex arr1 = realArray1[tid]; // cudaPres arr2 = realArray2[tid]; // Write to Surfaces fp_surf2DLayeredwrite( arr1, surf_psi2D, t_i, t_j, int(0), hipBoundaryModeClamp); // fp_surf3Dwrite( arr2, surf_psi0OutImag, t_x*sizeof(cudaPres), t_y, t_z, hipBoundaryModeClamp); } __global__ void write2surf_kernel2DNL(pyComplex *realArray1){ // This kernel writes quantum pressure and non linear term of energy int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex arr1 = realArray1[tid]; // cudaPres arr2 = realArray2[tid]; // Write to Surfaces fp_surf2Dwrite( arr1, surf_psi2DNL, t_i, t_j, hipBoundaryModeClamp); // fp_surf3Dwrite( arr2, surf_psi0OutImag, t_x*sizeof(cudaPres), t_y, t_z, hipBoundaryModeClamp); }
1b7ef682f37f6343a348f702af4675d5b57af064.cu
#include <pycuda-complex.hpp> #include <pycuda-helpers.hpp> #include <surface_functions.h> #define pi 3.14159265 #define phi 1.6180339 typedef pycuda::complex<cudaPres> pyComplex; extern "C++" { typedef float fp_tex_float; typedef int2 fp_tex_double; typedef uint2 fp_tex_cfloat; typedef int4 fp_tex_cdouble; __device__ void fp_surf2Dwrite(double var,surface<void, cudaSurfaceType2D> surf, int i, int j, enum cudaSurfaceBoundaryMode mode) { fp_tex_double auxvar; auxvar.x = __double2loint(var); auxvar.y = __double2hiint(var); surf2Dwrite(auxvar, surf, i*sizeof(fp_tex_double), j, mode); } __device__ void fp_surf2Dwrite(pycuda::complex<float> var,surface<void, cudaSurfaceType2D> surf, int i, int j, enum cudaSurfaceBoundaryMode mode) { fp_tex_cfloat auxvar; auxvar.x = __float_as_int(var._M_re); auxvar.y = __float_as_int(var._M_im); surf2Dwrite(auxvar, surf, i*sizeof(fp_tex_cfloat), j,mode); } __device__ void fp_surf2Dwrite(pycuda::complex<double> var,surface<void, cudaSurfaceType2D> surf, int i, int j, enum cudaSurfaceBoundaryMode mode) { fp_tex_cdouble auxvar; auxvar.x = __double2loint(var._M_re); auxvar.y = __double2hiint(var._M_re); auxvar.z = __double2loint(var._M_im); auxvar.w = __double2hiint(var._M_im); surf2Dwrite(auxvar, surf, i*sizeof(fp_tex_cdouble), j,mode); } __device__ void fp_surf2Dread(double *var, surface<void, cudaSurfaceType2D> surf, int i, int j, enum cudaSurfaceBoundaryMode mode) { fp_tex_double v; surf2Dread(&v, surf, i*sizeof(fp_tex_double), j, mode); *var = __hiloint2double(v.y, v.x); } __device__ void fp_surf2Dread(pycuda::complex<float> *var, surface<void, cudaSurfaceType2D> surf, int i, int j, enum cudaSurfaceBoundaryMode mode) { fp_tex_cfloat v; surf2Dread(&v, surf, i*sizeof(fp_tex_cfloat), j, mode); *var = pycuda::complex<float>(__int_as_float(v.x), __int_as_float(v.y)); } __device__ void fp_surf2Dread(pycuda::complex<double> *var, surface<void, cudaSurfaceType2D> surf, int i, int j, enum cudaSurfaceBoundaryMode mode) { fp_tex_cdouble v; surf2Dread(&v, surf, i*sizeof(fp_tex_cdouble), j, mode); *var = pycuda::complex<double>(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z)); } } surface< void, cudaSurfaceType3D> surf_psi ; texture< fp_pres, cudaTextureType3D, cudaReadModeElementType> tex_psi ; surface< void, cudaSurfaceType2DLayered> surf_psi2D; surface< void, cudaSurfaceType2D> surf_psi2DNL; texture< fp_pres, cudaTextureType2D, cudaReadModeElementType> tex_psi2D ; __device__ cudaPres KspaceFFT(int tid, int nPoint, cudaPres L){ cudaPres Kfft; if (tid < nPoint/2){ Kfft = 2.0f*pi*(tid)/L; } else { Kfft = 2.0f*pi*(tid-nPoint)/L; } return Kfft; } __global__ void gaussian_kernel( cudaPres dx,cudaPres dy, cudaPres dz, cudaPres xMin,cudaPres yMin, cudaPres zMin, cudaPres a,cudaPres b, cudaPres c, cudaPres d,cudaPres e, cudaPres f, int caso, pyComplex *psi){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int t_k = blockIdx.z*blockDim.z + threadIdx.z; int tid = t_i + t_j*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y; cudaPres x=xMin+t_i*dx; cudaPres y=yMin+t_j*dy; cudaPres z=zMin+t_k*dz; pyComplex value; if (caso==0){ value._M_re=exp(-a*x*x-b*y*y-c*z*z); value._M_im=exp(-d*x*x-e*y*y-f*z*z); } if (caso==1){ value._M_re=x; value._M_im=y; } psi[tid] = value; } __global__ void laplaFFT_kernel( cudaPres Lx,cudaPres Ly, cudaPres Lz, int nPointX,int nPointY, int nPointZ, pyComplex *fftTrnf){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int t_k = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_i + gridDim.z * blockDim.z * t_j + t_k; cudaPres kX = KspaceFFT(t_i,nPointX, Lx);//kx[t_j]; cudaPres kY = KspaceFFT(t_j,nPointY, Ly);//ky[t_i]; cudaPres kZ = KspaceFFT(t_k,nPointZ, Lz);//kz[t_k]; cudaPres k2 = kX*kX + kY*kY + kZ*kZ; pyComplex value = fftTrnf[tid]; fftTrnf[tid] = -k2*value; } __global__ void laplaFDtex_kernel(cudaPres dx, cudaPres dy, cudaPres dz, pyComplex *func_d){ int t_x = blockIdx.x*blockDim.x + threadIdx.x; int t_y = blockIdx.y*blockDim.y + threadIdx.y; int t_z = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_x + blockDim.z * gridDim.z * t_y + t_z; pyComplex center, right, left, up, down, top, bottom; center = fp_tex3D(tex_psi, t_z, t_y, t_x); up = fp_tex3D(tex_psi, t_z, t_y+1, t_x); down = fp_tex3D(tex_psi, t_z, t_y-1, t_x); right = fp_tex3D(tex_psi, t_z, t_y, t_x+1); left = fp_tex3D(tex_psi, t_z, t_y, t_x-1); top = fp_tex3D(tex_psi, t_z+1, t_y, t_x); bottom = fp_tex3D(tex_psi, t_z-1, t_y, t_x); cudaPres drInv = 1.0/dy; pyComplex laplacian = (up + down - 2.0cString*center )*drInv*drInv; drInv = 1.0/dx; laplacian += (right + left - 2.0cString*center )*drInv*drInv; drInv = 1.0/dz; laplacian += (top + bottom - 2.0cString*center )*drInv*drInv; func_d[tid] = laplacian; } __global__ void laplaFDsurf_kernel(cudaPres dx, cudaPres dy, cudaPres dz, pyComplex *func_d){ int t_x = blockIdx.x*blockDim.x + threadIdx.x; int t_y = blockIdx.y*blockDim.y + threadIdx.y; int t_z = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_x + blockDim.z * gridDim.z * t_y + t_z; pyComplex center, right, left, up, down, top, bottom; fp_surf3Dread(&center,surf_psi, t_z, t_y, t_x, cudaBoundaryModeZero); fp_surf3Dread(&up,surf_psi, t_z, t_y+1, t_x, cudaBoundaryModeZero); fp_surf3Dread(&down,surf_psi, t_z, t_y-1, t_x, cudaBoundaryModeZero); fp_surf3Dread(&right,surf_psi, t_z, t_y, t_x+1, cudaBoundaryModeZero); fp_surf3Dread(&left,surf_psi, t_z, t_y, t_x-1, cudaBoundaryModeZero); fp_surf3Dread(&top,surf_psi, t_z+1, t_y, t_x, cudaBoundaryModeZero); fp_surf3Dread(&bottom,surf_psi, t_z-1, t_y, t_x, cudaBoundaryModeZero); cudaPres drInv = 1.0/dy; pyComplex laplacian = (up + down - 2.0cString*center )*drInv*drInv; drInv = 1.0/dx; laplacian += (right + left - 2.0cString*center )*drInv*drInv; drInv = 1.0/dz; laplacian += (top + bottom - 2.0cString*center )*drInv*drInv; func_d[tid] = laplacian; } __global__ void test_tex_kernel( pyComplex *func_d){ int t_x = blockIdx.x*blockDim.x + threadIdx.x; int t_y = blockIdx.y*blockDim.y + threadIdx.y; int t_z = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_x + blockDim.z * gridDim.z * t_y + t_z; pyComplex center; center = fp_tex3D(tex_psi, t_z, t_y, t_x); func_d[tid] = center; } __global__ void test_surf_kernel(pyComplex *func_d){ int t_x = blockIdx.x*blockDim.x + threadIdx.x; int t_y = blockIdx.y*blockDim.y + threadIdx.y; int t_z = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_x + blockDim.z * gridDim.z * t_y + t_z; pyComplex center; fp_surf3Dread(&center,surf_psi, t_z, t_y, t_x, cudaBoundaryModeZero); func_d[tid] = center; } __global__ void setzero_kernel(pyComplex *func_d){ int t_x = blockIdx.x*blockDim.x + threadIdx.x; int t_y = blockIdx.y*blockDim.y + threadIdx.y; int t_z = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_x + blockDim.z * gridDim.z * t_y + t_z; func_d[tid] *=0; } __global__ void write2surf_kernel(pyComplex *realArray1){ // This kernel writes quantum pressure and non linear term of energy int t_x = blockIdx.x*blockDim.x + threadIdx.x; int t_y = blockIdx.y*blockDim.y + threadIdx.y; int t_z = blockIdx.z*blockDim.z + threadIdx.z; int tid = gridDim.z * blockDim.z * gridDim.y * blockDim.y * t_x + blockDim.z * gridDim.z * t_y + t_z; pyComplex arr1 = realArray1[tid]; // cudaPres arr2 = realArray2[tid]; // Write to Surfaces fp_surf3Dwrite( arr1, surf_psi, t_z, t_y, t_x, cudaBoundaryModeClamp); // fp_surf3Dwrite( arr2, surf_psi0OutImag, t_x*sizeof(cudaPres), t_y, t_z, cudaBoundaryModeClamp); } //################################################# 2D __global__ void gaussian_kernel2D( cudaPres dx,cudaPres dy, cudaPres xMin,cudaPres yMin, cudaPres a,cudaPres b, cudaPres d,cudaPres e, int caso, pyComplex *psi){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j + t_i ; cudaPres x=xMin+t_j*dx; cudaPres y=yMin+t_i*dy; pyComplex value; if (caso==0){ value._M_re=exp(-a*x*x-b*y*y); value._M_im=exp(-d*x*x-e*y*y); } if (caso==1){ value._M_re=x; value._M_im=y; } psi[tid] = value; } __global__ void laplaFFT_kernel2D( cudaPres Lx,cudaPres Ly, int nPointX,int nPointY, pyComplex *fftTrnf){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; cudaPres kX = KspaceFFT(t_j,nPointX, Lx);//kx[t_j]; cudaPres kY = KspaceFFT(t_i,nPointY, Ly);//ky[t_i]; //cudaPres kZ = KspaceFFT(t_k,nPointZ, Lz);//kz[t_k]; cudaPres k2 = kX*kX + kY*kY ; pyComplex value = fftTrnf[tid]; fftTrnf[tid] = -k2*value; } __global__ void laplaFDtex_kernel2D(cudaPres dx, cudaPres dy, pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex center, right, left, up, down; center = fp_tex2D(tex_psi2D, t_i, t_j); right = fp_tex2D(tex_psi2D, t_i, t_j+1); left = fp_tex2D(tex_psi2D, t_i, t_j-1); up = fp_tex2D(tex_psi2D, t_i+1, t_j); down = fp_tex2D(tex_psi2D, t_i-1, t_j); cudaPres drInv = 1.0/dy; pyComplex laplacian = (up + down - 2.0cString*center )*drInv*drInv; drInv = 1.0/dx; laplacian += (right + left - 2.0cString*center )*drInv*drInv; //drInv = 1.0/dz; //laplacian += (top + bottom - 2.0cString*center )*drInv*drInv; func_d[tid] = laplacian; } __global__ void laplaFDsurf_kernel2D(cudaPres dx, cudaPres dy, pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex center, right, left, up, down ; fp_surf2DLayeredread(&center,surf_psi2D, t_i, t_j, int(0), cudaBoundaryModeZero); fp_surf2DLayeredread(&up, surf_psi2D, t_i+1, t_j, int(0), cudaBoundaryModeZero); fp_surf2DLayeredread(&down, surf_psi2D, t_i-1, t_j, int(0), cudaBoundaryModeZero); fp_surf2DLayeredread(&right, surf_psi2D, t_i, t_j+1, int(0), cudaBoundaryModeZero); fp_surf2DLayeredread(&left, surf_psi2D, t_i, t_j-1, int(0), cudaBoundaryModeZero); //fp_surf3Dread(&top,surf_psi, t_z+1, t_y, t_x, cudaBoundaryModeZero); //fp_surf3Dread(&bottom,surf_psi, t_z-1, t_y, t_x, cudaBoundaryModeZero); cudaPres drInv = 1.0/dy; pyComplex laplacian = (up + down - 2.0cString*center )*drInv*drInv; drInv = 1.0/dx; laplacian += (right + left - 2.0cString*center )*drInv*drInv; func_d[tid] = laplacian; } __global__ void laplaFDsurf_kernel2DNL(cudaPres dx, cudaPres dy, pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex center, right, left, up, down ; fp_surf2Dread(&center,surf_psi2DNL, t_i, t_j, cudaBoundaryModeZero); fp_surf2Dread(&up, surf_psi2DNL, t_i+1, t_j, cudaBoundaryModeZero); fp_surf2Dread(&down, surf_psi2DNL, t_i-1, t_j, cudaBoundaryModeZero); fp_surf2Dread(&right, surf_psi2DNL, t_i, t_j+1, cudaBoundaryModeZero); fp_surf2Dread(&left, surf_psi2DNL, t_i, t_j-1, cudaBoundaryModeZero); //fp_surf3Dread(&top,surf_psi, t_z+1, t_y, t_x, cudaBoundaryModeZero); //fp_surf3Dread(&bottom,surf_psi, t_z-1, t_y, t_x, cudaBoundaryModeZero); cudaPres drInv = 1.0/dy; pyComplex laplacian = (up + down - 2.0cString*center )*drInv*drInv; drInv = 1.0/dx; laplacian += (right + left - 2.0cString*center )*drInv*drInv; func_d[tid] = laplacian; } __global__ void test_tex_kernel2D( pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex center; center = fp_tex2D(tex_psi2D, t_i, t_j); func_d[tid] = center; } __global__ void test_surf_kernel2D(pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex center; fp_surf2DLayeredread(&center,surf_psi2D, t_i, t_j, int(0), cudaBoundaryModeZero); func_d[tid] = center; } __global__ void test_surf_kernel2DNL(pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex center; fp_surf2Dread(&center,surf_psi2DNL, t_i, t_j, cudaBoundaryModeZero); func_d[tid] = center; } __global__ void setzero_kernel2D(pyComplex *func_d){ int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; func_d[tid] *=0; } __global__ void write2surf_kernel2D(pyComplex *realArray1){ // This kernel writes quantum pressure and non linear term of energy int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex arr1 = realArray1[tid]; // cudaPres arr2 = realArray2[tid]; // Write to Surfaces fp_surf2DLayeredwrite( arr1, surf_psi2D, t_i, t_j, int(0), cudaBoundaryModeClamp); // fp_surf3Dwrite( arr2, surf_psi0OutImag, t_x*sizeof(cudaPres), t_y, t_z, cudaBoundaryModeClamp); } __global__ void write2surf_kernel2DNL(pyComplex *realArray1){ // This kernel writes quantum pressure and non linear term of energy int t_i = blockIdx.x*blockDim.x + threadIdx.x; int t_j = blockIdx.y*blockDim.y + threadIdx.y; int tid = gridDim.x * blockDim.x * t_j +t_i ; pyComplex arr1 = realArray1[tid]; // cudaPres arr2 = realArray2[tid]; // Write to Surfaces fp_surf2Dwrite( arr1, surf_psi2DNL, t_i, t_j, cudaBoundaryModeClamp); // fp_surf3Dwrite( arr2, surf_psi0OutImag, t_x*sizeof(cudaPres), t_y, t_z, cudaBoundaryModeClamp); }
8532734c765aa50a4236619e4cab63f7232d85e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Note: The tests in this file involve run-time work on the GPU. * The way this is managed is with just a _single_ kernel for any * and all possible testcase - which is very generic: It * runs arbitrary test-case specific code, which is intended to * produce a sequence of values. These values are not necessarily * "results" - that depends on what it is you're running - but * they're values to then _check_ afterwards on the host side. */ #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "common_hip.cuh" #include <kat/on_device/collaboration/grid.cuh> #include <kat/on_device/collaboration/block.cuh> #include <kat/on_device/collaboration/warp.cuh> #include <kat/on_device/atomics.cuh> using std::size_t; #if __cplusplus < 201701L #include <experimental/optional> template <typename T> using optional = std::experimental::optional<T>; #else template <typename T> #include <optional> using optional = std::optional<T>; #endif template <typename T> const auto make_exact_comparison { optional<T>{} }; namespace klcg = kat::linear_grid::collaborative::grid; namespace klcb = kat::linear_grid::collaborative::block; // namespace kcg = kat::collaborative::grid; namespace kcb = kat::collaborative::block; namespace kcw = kat::collaborative::warp; namespace kernels { template <typename F, typename T, typename... Is> __global__ void execute_testcase( F testcase_device_function, size_t num_values_to_populate, T* __restrict__ values_to_populate, const Is* __restrict__ ... inputs ) { testcase_device_function(num_values_to_populate, values_to_populate, inputs...); } } // namespace kernels template <typename T> std::size_t set_width_for_up_to(T max) { // assert(std::is_integral<I>::value, "Only integer types supported for now"); std::stringstream ss; ss << std::dec << max; return ss.str().length(); } namespace detail { template <typename T> auto tolerance_gadget(std::true_type, T x, optional<T> tolerance) { auto eps = tolerance.value_or(0); return doctest::Approx(x).epsilon(eps); } template <typename T> T tolerance_gadget(std::false_type, T x, optional<T>) { return x; } } // namespace detail template <typename T> auto tolerance_gadget(T x, optional<T> tolerance) { constexpr const auto is_arithmetic = std::is_arithmetic< std::decay_t<T> >::value; return detail::tolerance_gadget(std::integral_constant<bool, is_arithmetic>{}, x, tolerance); } // TODO: Take iterator templates rather than pointers template <typename T, typename F, typename... Is> void check_results( size_t num_values_to_check, // perhaps add another parameter for specific individual-check details? const T* __restrict__ actual_values, F expected_value_retriever, optional<T> comparison_tolerance_fraction, const Is* __restrict__... inputs) { std::stringstream ss; auto index_width = set_width_for_up_to(num_values_to_check); // TODO: Consider using the maximum/minimum result values to set field widths. for(size_t i = 0; i < num_values_to_check; i++) { ss.str(""); ss << "Assertion " << std::setw(index_width) << (i+1) << " for testcase " << doctest::current_test_name() // << " :\n" << "(" << std::make_tuple(inputs[i]...) << ")" ; auto mismatch_message { ss.str() }; if (comparison_tolerance_fraction) { CHECK_MESSAGE(actual_values[i] == tolerance_gadget(expected_value_retriever(i), comparison_tolerance_fraction), mismatch_message); } else { CHECK_MESSAGE(actual_values[i] == expected_value_retriever(i), mismatch_message); } } } template <typename T> struct tag {}; /** * @brief Executes a testcase intended to make certain checks using a GPU kernel * which produces the values to check for. * * @note The actual checks are eventually conducted on the host side, since doctest * code can't actually do anything useful on the GPU. So on the GPU side we "merely" * compute the values to check and let the test logic peform the actual comparison later * on. */ template <typename F, typename K, typename T, typename... Is, size_t... Indices> auto execute_testcase_on_gpu( tag<T>, std::index_sequence<Indices...>, K testcase_kernel, F testcase_device_function, cuda::launch_configuration_t launch_config, size_t num_values_to_populate, Is* __restrict__ ... inputs) { cuda::device_t device { cuda::device::current::get() }; auto device_side_results { cuda::memory::device::make_unique<T[]>(device, num_values_to_populate) }; cuda::memory::device::zero(device_side_results.get(), num_values_to_populate * sizeof(T)); // just to be on the safe side auto host_side_results { std::vector<T>(num_values_to_populate) }; auto make_device_side_input = [&device, num_values_to_populate](auto input, size_t n) { using input_type = std::remove_reference_t<decltype(*input)>; auto device_side_input = cuda::memory::device::make_unique<input_type[]>(device, n); cuda::memory::copy(device_side_input.get(), input, num_values_to_populate * sizeof(input_type)); return std::move(device_side_input); }; auto device_side_inputs = std::make_tuple( make_device_side_input(inputs, num_values_to_populate)... ); ignore(device_side_inputs); // for the case of no inputs cuda::launch( testcase_kernel, launch_config, testcase_device_function, num_values_to_populate, device_side_results.get(), std::get<Indices>(device_side_inputs).get()... ); cuda::memory::copy(host_side_results.data(), device_side_results.get(), sizeof(T) * num_values_to_populate); return host_side_results; } template <typename F, typename ExpectedResultRetriever, typename T, typename... Is> void execute_non_uniform_testcase_on_gpu_and_check( F testcase_device_function, ExpectedResultRetriever expected_value_retriever, size_t num_values_to_populate, cuda::grid::dimensions_t grid_dimensions, cuda::grid::block_dimensions_t block_dimensions, optional<T> comparison_tolerance_fraction, Is* __restrict__ ... inputs) { auto launch_config { cuda::make_launch_config(grid_dimensions, block_dimensions) }; // TODO: Should we check that num_values_to_populate is equal to the number of grid threads? auto host_side_results = execute_testcase_on_gpu( tag<T>{}, typename std::make_index_sequence<sizeof...(Is)> {}, kernels::execute_testcase<F, T, Is...>, testcase_device_function, launch_config, num_values_to_populate, inputs... ); check_results ( num_values_to_populate, // perhaps add another parameter for specific testcase details? host_side_results.data(), expected_value_retriever, comparison_tolerance_fraction, inputs...); } TEST_SUITE("grid-level") { // Note: Types for instantiation are chosen based on what's actually available in CUDA TEST_CASE("at_grid_stride") { using checked_value_type = uint32_t; // No inputs, nor concrete expected results. auto testcase_device_function = [] KAT_DEV (size_t length, checked_value_type* results) { auto f_inner = [&] (size_t pos) { results[pos] = kat::linear_grid::grid_info::thread::id_in_grid(); }; klcg::at_grid_stride(length, f_inner); }; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 2 }; auto total_num_threads = num_grid_blocks * num_threads_per_block; auto expected_value_retriever = [total_num_threads] (size_t pos) { // Which thread processes position pos? return checked_value_type(pos % total_num_threads); }; auto num_values_to_populate = total_num_threads * 2 + kat::warp_size / 2 - 1; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } TEST_CASE("at_block_stride") { using checked_value_type = uint32_t; // The type for number of grids in a thread. Should we typedef that? cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 2 }; auto total_num_threads = num_grid_blocks * num_threads_per_block; size_t length_to_cover = total_num_threads * 2 + kat::warp_size / 2 - 1; // We don't actually create input data, we just need each element in the // range 0 ... length_to_cover-1 to be attended to by some thread // // In this test case, there's a single common range which the whole grid covers // (as opposed to block-level or warp-level collaboration) auto testcase_device_function = [] KAT_DEV (size_t length, checked_value_type* results) { auto f_inner = [&] (size_t pos) { // printf("Thread %u in block %u got pos %u of %u\n", threadIdx.x, blockIdx.x, (unsigned) pos, (unsigned) length); results[pos] = kat::linear_grid::grid_info::thread::id_in_grid(); }; auto serialization_factor = length / kat::linear_grid::grid_info::grid::num_threads() + (length % kat::linear_grid::grid_info::grid::num_threads() != 0); klcg::at_block_stride(length, f_inner, serialization_factor); }; auto serialization_factor = div_rounding_up(length_to_cover, total_num_threads); auto elements_processed_per_block = serialization_factor * num_threads_per_block; // std::cout << "length_to_cover = " << length_to_cover << ", num_threads_per_block = " << num_threads_per_block << ", elements_per_block = " << serialization_factor << '\n'; auto expected_value_retriever = [=] (size_t pos) { // Which thread processes position pos? auto processing_block_index = pos / elements_processed_per_block; auto processing_thread_index = pos % num_threads_per_block; // which is the same as (pos % processing_block_index) % num_threads_per_block return checked_value_type(processing_block_index * num_threads_per_block + processing_thread_index); }; // for(int i = 0; i < length_to_cover; i++) { // if (i % 10 == 0) { std::cout << '\n' << std::setw(3) << i << ": "; } // std::cout << std::setw(3) << expected_value_retriever(i) << " "; // } // std::cout << "\n\n"; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, length_to_cover, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } struct attending_threads_info { struct { uint32_t grid_size_minus_first, last; // We use grid_size_minus_first rather than first, so that // zero-initialization would be semantically acceptable } extrema; uint32_t num; }; // Note: All of this gets zero-initialized std::ostream& operator<<(std::ostream& os, const attending_threads_info& ati) { return os << "{ {" << ati.extrema.grid_size_minus_first << ", " << ati.extrema.last << " }, " << ati.num << " }"; } bool operator==(const attending_threads_info& lhs, const attending_threads_info & rhs) { return lhs.extrema.grid_size_minus_first == rhs.extrema.grid_size_minus_first and lhs.extrema.last == rhs.extrema.last and lhs.num == rhs.num; } TEST_CASE("warp_per_input_element::at_grid_stride") { using checked_value_type = attending_threads_info; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 15 }; auto total_num_threads = num_grid_blocks * num_threads_per_block; auto length_to_cover = total_num_threads / 4 + 1; // We don't actually create input data, we just need each element in the // range 0 ... length_to_cover-1 to be attended to by some full warp auto num_values_to_populate = length_to_cover; auto testcase_device_function = [=] KAT_DEV ( size_t length_of_attending_threads_info, checked_value_type* attending_threads_info) { namespace gi = kat::linear_grid::grid_info; const auto my_index = gi::thread::id_in_grid(); auto grid_size_minus_my_index = gi::grid::num_threads() - my_index; auto f_inner = [&] (size_t pos) { // printf("Thead %d of block %d is handling pos %lu\n", threadIdx.x, blockIdx.x, pos); kat::atomic::increment(&attending_threads_info[pos].num); kat::atomic::max(&attending_threads_info[pos].extrema.grid_size_minus_first, grid_size_minus_my_index); kat::atomic::max(&attending_threads_info[pos].extrema.last, my_index); }; klcg::warp_per_input_element::at_grid_stride(length_to_cover, f_inner); }; auto expected_value_retriever = [=] (size_t pos) { // Which threads have handled position pos? auto total_num_warps = total_num_threads / kat::warp_size; auto modular_pos = pos % total_num_warps; uint32_t first_thread_to_handle_element = modular_pos * kat::warp_size; uint32_t grid_size_minus_first = total_num_threads - first_thread_to_handle_element; uint32_t last = (modular_pos+1) * kat::warp_size - 1; uint32_t num = kat::warp_size; return attending_threads_info { { grid_size_minus_first, last }, num }; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } } // TEST_SUITE("grid-level")
8532734c765aa50a4236619e4cab63f7232d85e5.cu
/* * Note: The tests in this file involve run-time work on the GPU. * The way this is managed is with just a _single_ kernel for any * and all possible testcase - which is very generic: It * runs arbitrary test-case specific code, which is intended to * produce a sequence of values. These values are not necessarily * "results" - that depends on what it is you're running - but * they're values to then _check_ afterwards on the host side. */ #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "common.cuh" #include <kat/on_device/collaboration/grid.cuh> #include <kat/on_device/collaboration/block.cuh> #include <kat/on_device/collaboration/warp.cuh> #include <kat/on_device/atomics.cuh> using std::size_t; #if __cplusplus < 201701L #include <experimental/optional> template <typename T> using optional = std::experimental::optional<T>; #else template <typename T> #include <optional> using optional = std::optional<T>; #endif template <typename T> const auto make_exact_comparison { optional<T>{} }; namespace klcg = kat::linear_grid::collaborative::grid; namespace klcb = kat::linear_grid::collaborative::block; // namespace kcg = kat::collaborative::grid; namespace kcb = kat::collaborative::block; namespace kcw = kat::collaborative::warp; namespace kernels { template <typename F, typename T, typename... Is> __global__ void execute_testcase( F testcase_device_function, size_t num_values_to_populate, T* __restrict__ values_to_populate, const Is* __restrict__ ... inputs ) { testcase_device_function(num_values_to_populate, values_to_populate, inputs...); } } // namespace kernels template <typename T> std::size_t set_width_for_up_to(T max) { // assert(std::is_integral<I>::value, "Only integer types supported for now"); std::stringstream ss; ss << std::dec << max; return ss.str().length(); } namespace detail { template <typename T> auto tolerance_gadget(std::true_type, T x, optional<T> tolerance) { auto eps = tolerance.value_or(0); return doctest::Approx(x).epsilon(eps); } template <typename T> T tolerance_gadget(std::false_type, T x, optional<T>) { return x; } } // namespace detail template <typename T> auto tolerance_gadget(T x, optional<T> tolerance) { constexpr const auto is_arithmetic = std::is_arithmetic< std::decay_t<T> >::value; return detail::tolerance_gadget(std::integral_constant<bool, is_arithmetic>{}, x, tolerance); } // TODO: Take iterator templates rather than pointers template <typename T, typename F, typename... Is> void check_results( size_t num_values_to_check, // perhaps add another parameter for specific individual-check details? const T* __restrict__ actual_values, F expected_value_retriever, optional<T> comparison_tolerance_fraction, const Is* __restrict__... inputs) { std::stringstream ss; auto index_width = set_width_for_up_to(num_values_to_check); // TODO: Consider using the maximum/minimum result values to set field widths. for(size_t i = 0; i < num_values_to_check; i++) { ss.str(""); ss << "Assertion " << std::setw(index_width) << (i+1) << " for testcase " << doctest::current_test_name() // << " :\n" << "(" << std::make_tuple(inputs[i]...) << ")" ; auto mismatch_message { ss.str() }; if (comparison_tolerance_fraction) { CHECK_MESSAGE(actual_values[i] == tolerance_gadget(expected_value_retriever(i), comparison_tolerance_fraction), mismatch_message); } else { CHECK_MESSAGE(actual_values[i] == expected_value_retriever(i), mismatch_message); } } } template <typename T> struct tag {}; /** * @brief Executes a testcase intended to make certain checks using a GPU kernel * which produces the values to check for. * * @note The actual checks are eventually conducted on the host side, since doctest * code can't actually do anything useful on the GPU. So on the GPU side we "merely" * compute the values to check and let the test logic peform the actual comparison later * on. */ template <typename F, typename K, typename T, typename... Is, size_t... Indices> auto execute_testcase_on_gpu( tag<T>, std::index_sequence<Indices...>, K testcase_kernel, F testcase_device_function, cuda::launch_configuration_t launch_config, size_t num_values_to_populate, Is* __restrict__ ... inputs) { cuda::device_t device { cuda::device::current::get() }; auto device_side_results { cuda::memory::device::make_unique<T[]>(device, num_values_to_populate) }; cuda::memory::device::zero(device_side_results.get(), num_values_to_populate * sizeof(T)); // just to be on the safe side auto host_side_results { std::vector<T>(num_values_to_populate) }; auto make_device_side_input = [&device, num_values_to_populate](auto input, size_t n) { using input_type = std::remove_reference_t<decltype(*input)>; auto device_side_input = cuda::memory::device::make_unique<input_type[]>(device, n); cuda::memory::copy(device_side_input.get(), input, num_values_to_populate * sizeof(input_type)); return std::move(device_side_input); }; auto device_side_inputs = std::make_tuple( make_device_side_input(inputs, num_values_to_populate)... ); ignore(device_side_inputs); // for the case of no inputs cuda::launch( testcase_kernel, launch_config, testcase_device_function, num_values_to_populate, device_side_results.get(), std::get<Indices>(device_side_inputs).get()... ); cuda::memory::copy(host_side_results.data(), device_side_results.get(), sizeof(T) * num_values_to_populate); return host_side_results; } template <typename F, typename ExpectedResultRetriever, typename T, typename... Is> void execute_non_uniform_testcase_on_gpu_and_check( F testcase_device_function, ExpectedResultRetriever expected_value_retriever, size_t num_values_to_populate, cuda::grid::dimensions_t grid_dimensions, cuda::grid::block_dimensions_t block_dimensions, optional<T> comparison_tolerance_fraction, Is* __restrict__ ... inputs) { auto launch_config { cuda::make_launch_config(grid_dimensions, block_dimensions) }; // TODO: Should we check that num_values_to_populate is equal to the number of grid threads? auto host_side_results = execute_testcase_on_gpu( tag<T>{}, typename std::make_index_sequence<sizeof...(Is)> {}, kernels::execute_testcase<F, T, Is...>, testcase_device_function, launch_config, num_values_to_populate, inputs... ); check_results ( num_values_to_populate, // perhaps add another parameter for specific testcase details? host_side_results.data(), expected_value_retriever, comparison_tolerance_fraction, inputs...); } TEST_SUITE("grid-level") { // Note: Types for instantiation are chosen based on what's actually available in CUDA TEST_CASE("at_grid_stride") { using checked_value_type = uint32_t; // No inputs, nor concrete expected results. auto testcase_device_function = [] KAT_DEV (size_t length, checked_value_type* results) { auto f_inner = [&] (size_t pos) { results[pos] = kat::linear_grid::grid_info::thread::id_in_grid(); }; klcg::at_grid_stride(length, f_inner); }; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 2 }; auto total_num_threads = num_grid_blocks * num_threads_per_block; auto expected_value_retriever = [total_num_threads] (size_t pos) { // Which thread processes position pos? return checked_value_type(pos % total_num_threads); }; auto num_values_to_populate = total_num_threads * 2 + kat::warp_size / 2 - 1; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } TEST_CASE("at_block_stride") { using checked_value_type = uint32_t; // The type for number of grids in a thread. Should we typedef that? cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 2 }; auto total_num_threads = num_grid_blocks * num_threads_per_block; size_t length_to_cover = total_num_threads * 2 + kat::warp_size / 2 - 1; // We don't actually create input data, we just need each element in the // range 0 ... length_to_cover-1 to be attended to by some thread // // In this test case, there's a single common range which the whole grid covers // (as opposed to block-level or warp-level collaboration) auto testcase_device_function = [] KAT_DEV (size_t length, checked_value_type* results) { auto f_inner = [&] (size_t pos) { // printf("Thread %u in block %u got pos %u of %u\n", threadIdx.x, blockIdx.x, (unsigned) pos, (unsigned) length); results[pos] = kat::linear_grid::grid_info::thread::id_in_grid(); }; auto serialization_factor = length / kat::linear_grid::grid_info::grid::num_threads() + (length % kat::linear_grid::grid_info::grid::num_threads() != 0); klcg::at_block_stride(length, f_inner, serialization_factor); }; auto serialization_factor = div_rounding_up(length_to_cover, total_num_threads); auto elements_processed_per_block = serialization_factor * num_threads_per_block; // std::cout << "length_to_cover = " << length_to_cover << ", num_threads_per_block = " << num_threads_per_block << ", elements_per_block = " << serialization_factor << '\n'; auto expected_value_retriever = [=] (size_t pos) { // Which thread processes position pos? auto processing_block_index = pos / elements_processed_per_block; auto processing_thread_index = pos % num_threads_per_block; // which is the same as (pos % processing_block_index) % num_threads_per_block return checked_value_type(processing_block_index * num_threads_per_block + processing_thread_index); }; // for(int i = 0; i < length_to_cover; i++) { // if (i % 10 == 0) { std::cout << '\n' << std::setw(3) << i << ": "; } // std::cout << std::setw(3) << expected_value_retriever(i) << " "; // } // std::cout << "\n\n"; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, length_to_cover, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } struct attending_threads_info { struct { uint32_t grid_size_minus_first, last; // We use grid_size_minus_first rather than first, so that // zero-initialization would be semantically acceptable } extrema; uint32_t num; }; // Note: All of this gets zero-initialized std::ostream& operator<<(std::ostream& os, const attending_threads_info& ati) { return os << "{ {" << ati.extrema.grid_size_minus_first << ", " << ati.extrema.last << " }, " << ati.num << " }"; } bool operator==(const attending_threads_info& lhs, const attending_threads_info & rhs) { return lhs.extrema.grid_size_minus_first == rhs.extrema.grid_size_minus_first and lhs.extrema.last == rhs.extrema.last and lhs.num == rhs.num; } TEST_CASE("warp_per_input_element::at_grid_stride") { using checked_value_type = attending_threads_info; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 15 }; auto total_num_threads = num_grid_blocks * num_threads_per_block; auto length_to_cover = total_num_threads / 4 + 1; // We don't actually create input data, we just need each element in the // range 0 ... length_to_cover-1 to be attended to by some full warp auto num_values_to_populate = length_to_cover; auto testcase_device_function = [=] KAT_DEV ( size_t length_of_attending_threads_info, checked_value_type* attending_threads_info) { namespace gi = kat::linear_grid::grid_info; const auto my_index = gi::thread::id_in_grid(); auto grid_size_minus_my_index = gi::grid::num_threads() - my_index; auto f_inner = [&] (size_t pos) { // printf("Thead %d of block %d is handling pos %lu\n", threadIdx.x, blockIdx.x, pos); kat::atomic::increment(&attending_threads_info[pos].num); kat::atomic::max(&attending_threads_info[pos].extrema.grid_size_minus_first, grid_size_minus_my_index); kat::atomic::max(&attending_threads_info[pos].extrema.last, my_index); }; klcg::warp_per_input_element::at_grid_stride(length_to_cover, f_inner); }; auto expected_value_retriever = [=] (size_t pos) { // Which threads have handled position pos? auto total_num_warps = total_num_threads / kat::warp_size; auto modular_pos = pos % total_num_warps; uint32_t first_thread_to_handle_element = modular_pos * kat::warp_size; uint32_t grid_size_minus_first = total_num_threads - first_thread_to_handle_element; uint32_t last = (modular_pos+1) * kat::warp_size - 1; uint32_t num = kat::warp_size; return attending_threads_info { { grid_size_minus_first, last }, num }; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } } // TEST_SUITE("grid-level")
c25f563613b4b619260438de5deecc2fe4c004a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdiocu.h> #include <stringcu.h> #include "../libcu/fsystem.h" #include <assert.h> static __global__ void g_fsystem_test1() { printf("fsystem_test1\n"); char newPath[MAX_PATH]; // ABSOLUTE strcpy(__cwd, ":\\test"); expandPath(":\\", newPath); int a0a = !strcmp(newPath, ":"); expandPath(":/one", newPath); int a1a = !strcmp(newPath, ":\\one"); expandPath(":\\one", newPath); int a2a = !strcmp(newPath, ":\\one"); expandPath(":\\one\\", newPath); int a3a = !strcmp(newPath, ":\\one"); assert(a0a && a1a && a2a && a3a); expandPath(":\\.", newPath); int b0a = !strcmp(newPath, ":"); expandPath(":\\one\\.", newPath); int b1a = !strcmp(newPath, ":\\one"); expandPath(":\\one\\.\\", newPath); int b2a = !strcmp(newPath, ":\\one"); expandPath(":\\one\\.\\two", newPath); int b3a = !strcmp(newPath, ":\\one\\two"); assert(b0a && b1a && b2a && b3a); expandPath(":\\one\\..\\two", newPath); int c0a = !strcmp(newPath, ":\\two"); expandPath(":\\one\\..\\two\\three", newPath); int c1a = !strcmp(newPath, ":\\two\\three"); assert(c0a && c1a); // ROOT strcpy(__cwd, ":\\test"); expandPath("\\.", newPath); int d0a = !strcmp(newPath, ":"); expandPath("\\one", newPath); int d1a = !strcmp(newPath, ":\\one"); assert(d0a && d1a); // RELATIVE strcpy(__cwd, ":\\test"); expandPath(".", newPath); int e0a = !strcmp(newPath, ":\\test"); //printf("%s\n", newPath); expandPath("one", newPath); int e1a = !strcmp(newPath, ":\\test\\one"); //printf("%s\n", newPath); //assert(e0a && e1a); // CHDIR strcpy(__cwd, ":\\test"); int f0a = fsystemChdir(":\\"); int f0b = !strcmp(__cwd, ":\\"); //assert(f0a); // OPENDIR dirEnt_t *g0a = fsystemOpendir(":\\"); int g0b = !strcmp(__cwd, ":\\"); //assert(g0a); // RENAME int h0a = fsystemRename(":\\", ":\\"); int h0b = !strcmp(__cwd, ":\\"); //assert(h0a); // UNLINK int i0a = fsystemUnlink(":\\", false); int i0b = !strcmp(__cwd, ":\\"); //assert(i0a); // MKDIR int r; dirEnt_t *j0a = fsystemMkdir(":\\", 0, &r); int j0b = !strcmp(__cwd, ":\\"); //assert(j0a); // OPEN int fd; dirEnt_t *k0a = fsystemOpen(":\\", 0, &fd); int k0b = !strcmp(__cwd, ":\\"); //assert(k0a); // RESET fsystemReset(); } hipError_t fsystem_test1() {hipLaunchKernelGGL(( g_fsystem_test1), dim3(1), dim3(1), 0, 0, ); return hipDeviceSynchronize(); }
c25f563613b4b619260438de5deecc2fe4c004a9.cu
#include <stdiocu.h> #include <stringcu.h> #include "../libcu/fsystem.h" #include <assert.h> static __global__ void g_fsystem_test1() { printf("fsystem_test1\n"); char newPath[MAX_PATH]; // ABSOLUTE strcpy(__cwd, ":\\test"); expandPath(":\\", newPath); int a0a = !strcmp(newPath, ":"); expandPath(":/one", newPath); int a1a = !strcmp(newPath, ":\\one"); expandPath(":\\one", newPath); int a2a = !strcmp(newPath, ":\\one"); expandPath(":\\one\\", newPath); int a3a = !strcmp(newPath, ":\\one"); assert(a0a && a1a && a2a && a3a); expandPath(":\\.", newPath); int b0a = !strcmp(newPath, ":"); expandPath(":\\one\\.", newPath); int b1a = !strcmp(newPath, ":\\one"); expandPath(":\\one\\.\\", newPath); int b2a = !strcmp(newPath, ":\\one"); expandPath(":\\one\\.\\two", newPath); int b3a = !strcmp(newPath, ":\\one\\two"); assert(b0a && b1a && b2a && b3a); expandPath(":\\one\\..\\two", newPath); int c0a = !strcmp(newPath, ":\\two"); expandPath(":\\one\\..\\two\\three", newPath); int c1a = !strcmp(newPath, ":\\two\\three"); assert(c0a && c1a); // ROOT strcpy(__cwd, ":\\test"); expandPath("\\.", newPath); int d0a = !strcmp(newPath, ":"); expandPath("\\one", newPath); int d1a = !strcmp(newPath, ":\\one"); assert(d0a && d1a); // RELATIVE strcpy(__cwd, ":\\test"); expandPath(".", newPath); int e0a = !strcmp(newPath, ":\\test"); //printf("%s\n", newPath); expandPath("one", newPath); int e1a = !strcmp(newPath, ":\\test\\one"); //printf("%s\n", newPath); //assert(e0a && e1a); // CHDIR strcpy(__cwd, ":\\test"); int f0a = fsystemChdir(":\\"); int f0b = !strcmp(__cwd, ":\\"); //assert(f0a); // OPENDIR dirEnt_t *g0a = fsystemOpendir(":\\"); int g0b = !strcmp(__cwd, ":\\"); //assert(g0a); // RENAME int h0a = fsystemRename(":\\", ":\\"); int h0b = !strcmp(__cwd, ":\\"); //assert(h0a); // UNLINK int i0a = fsystemUnlink(":\\", false); int i0b = !strcmp(__cwd, ":\\"); //assert(i0a); // MKDIR int r; dirEnt_t *j0a = fsystemMkdir(":\\", 0, &r); int j0b = !strcmp(__cwd, ":\\"); //assert(j0a); // OPEN int fd; dirEnt_t *k0a = fsystemOpen(":\\", 0, &fd); int k0b = !strcmp(__cwd, ":\\"); //assert(k0a); // RESET fsystemReset(); } cudaError_t fsystem_test1() { g_fsystem_test1<<<1, 1>>>(); return cudaDeviceSynchronize(); }
46b7f7badc3d162ec29ec8e9acd375d191b2e549.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <iostream> #include "gpu-new-forward.h" #define T_WIDTH 16 __constant__ float k[16*4*7*7]; __global__ void conv_forward_kernel(float *y, const float *x, const int B, const int M, const int C, const int H, const int W, const int K) { /* Modify this function to implement the forward pass described in Chapter 16. We have added an additional dimension to the tensors to support an entire mini-batch The goal here is to be correct AND fast. Function parameter definitions: y - output x - input k - kernel B - batch_size (number of images in x) M - number of output feature maps C - number of input feature maps H - input height dimension W - input width dimension K - kernel height and width (K x K) */ const int H_out = H - K + 1; const int W_out = W - K + 1; // We have some nice #defs for you below to simplify indexing. Feel free to use them, or create your own. // An example use of these macros: // float a = y4d(0,0,0,0) // y4d(0,0,0,0) = a #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0] #define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0] #define k4d(i3, i2, i1, i0) k[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0] // Insert your GPU convolution kernel code here int m = blockIdx.z; int bc = blockIdx.y*T_WIDTH + threadIdx.y; int coor = blockIdx.x*T_WIDTH + threadIdx.x; int w = coor % W_out; int h = coor / W_out; int b = bc % B; int c = bc / B; /*bool lower_bound = (w >= 0 && h >= 0 && b >= 0); bool upper_bound = (w < W && h < H && b < B); if (lower_bound && upper_bound) { N[b*C*H*W + ] }*/ if ((h < H_out) && (w < W_out) && b < B) { float temp = 0; for (int p = 0; p < K; p++) { for (int q = 0; q < K; q++) { temp += x4d(b, c, h + p, w + q) * k4d(m ,c, p, q); //k[(m) * (C * K * K) + (c) * (K * K) + (p) * (K) + q]; k4d(m ,c, p, q); } } atomicAdd(&(y4d(b, m, h, w)), temp); //y4d(b, m, h, w) = temp; } #undef y4d #undef x4d #undef k4d } __host__ void conv_forward_gpu(float *host_y, const float *host_x, const float *host_k, const int B, const int M, const int C, const int H, const int W, const int K) { // Declare relevant device pointers float* device_y; float* device_x; //float* device_k; // Allocate memory and copy over the relevant data structures to the GPU hipMalloc((void**)(&device_y), B * M * (H-K+1)*(W-K+1)*sizeof(float)); hipMalloc((void**)(&device_x), B * C * H * W * sizeof(float)); //hipMalloc((void**)(&device_k), M * C * K * K *sizeof(float)); hipMemcpy(device_x, host_x, B * C * H * W * sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(device_k, host_k, M * C * K * K *sizeof(float), hipMemcpyHostToDevice); hipMemcpyToSymbol(k, host_k, M * C * K * K *sizeof(float)); hipMemset(device_y, 0, B * M * (H-K+1)*(W-K+1) * sizeof(float)); // Set the kernel dimensions and call the kernel dim3 dimGrid(ceil((1.0*(W-K+1)*(H-K+1))/T_WIDTH), ceil((1.0*B*C)/T_WIDTH), M); dim3 dimBlock(T_WIDTH, T_WIDTH, 1); hipLaunchKernelGGL(( conv_forward_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, device_y, device_x, B, M, C, H, W, K); // Copy the output back to host hipMemcpy(host_y, device_y, B * M * (H-K+1)*(W-K+1)*sizeof(float), hipMemcpyDeviceToHost); // Free device memory hipFree(device_y); hipFree(device_x); //hipFree(device_k); // Useful snippet for error checking // hipError_t error = hipGetLastError(); // if(error != hipSuccess) // { // std::cout<<"CUDA error: "<<hipGetErrorString(error)<<std::endl; // exit(-1); // } } __host__ void GPUInterface::get_device_properties() { int deviceCount; hipGetDeviceCount(&deviceCount); for(int dev = 0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl; std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl; std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl; std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl; std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl; std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl; std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl; std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl; std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl; } }
46b7f7badc3d162ec29ec8e9acd375d191b2e549.cu
#include <cmath> #include <iostream> #include "gpu-new-forward.h" #define T_WIDTH 16 __constant__ float k[16*4*7*7]; __global__ void conv_forward_kernel(float *y, const float *x, const int B, const int M, const int C, const int H, const int W, const int K) { /* Modify this function to implement the forward pass described in Chapter 16. We have added an additional dimension to the tensors to support an entire mini-batch The goal here is to be correct AND fast. Function parameter definitions: y - output x - input k - kernel B - batch_size (number of images in x) M - number of output feature maps C - number of input feature maps H - input height dimension W - input width dimension K - kernel height and width (K x K) */ const int H_out = H - K + 1; const int W_out = W - K + 1; // We have some nice #defs for you below to simplify indexing. Feel free to use them, or create your own. // An example use of these macros: // float a = y4d(0,0,0,0) // y4d(0,0,0,0) = a #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0] #define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0] #define k4d(i3, i2, i1, i0) k[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0] // Insert your GPU convolution kernel code here int m = blockIdx.z; int bc = blockIdx.y*T_WIDTH + threadIdx.y; int coor = blockIdx.x*T_WIDTH + threadIdx.x; int w = coor % W_out; int h = coor / W_out; int b = bc % B; int c = bc / B; /*bool lower_bound = (w >= 0 && h >= 0 && b >= 0); bool upper_bound = (w < W && h < H && b < B); if (lower_bound && upper_bound) { N[b*C*H*W + ] }*/ if ((h < H_out) && (w < W_out) && b < B) { float temp = 0; for (int p = 0; p < K; p++) { for (int q = 0; q < K; q++) { temp += x4d(b, c, h + p, w + q) * k4d(m ,c, p, q); //k[(m) * (C * K * K) + (c) * (K * K) + (p) * (K) + q]; k4d(m ,c, p, q); } } atomicAdd(&(y4d(b, m, h, w)), temp); //y4d(b, m, h, w) = temp; } #undef y4d #undef x4d #undef k4d } __host__ void conv_forward_gpu(float *host_y, const float *host_x, const float *host_k, const int B, const int M, const int C, const int H, const int W, const int K) { // Declare relevant device pointers float* device_y; float* device_x; //float* device_k; // Allocate memory and copy over the relevant data structures to the GPU cudaMalloc((void**)(&device_y), B * M * (H-K+1)*(W-K+1)*sizeof(float)); cudaMalloc((void**)(&device_x), B * C * H * W * sizeof(float)); //cudaMalloc((void**)(&device_k), M * C * K * K *sizeof(float)); cudaMemcpy(device_x, host_x, B * C * H * W * sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(device_k, host_k, M * C * K * K *sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(k, host_k, M * C * K * K *sizeof(float)); cudaMemset(device_y, 0, B * M * (H-K+1)*(W-K+1) * sizeof(float)); // Set the kernel dimensions and call the kernel dim3 dimGrid(ceil((1.0*(W-K+1)*(H-K+1))/T_WIDTH), ceil((1.0*B*C)/T_WIDTH), M); dim3 dimBlock(T_WIDTH, T_WIDTH, 1); conv_forward_kernel<<<dimGrid,dimBlock>>>(device_y, device_x, B, M, C, H, W, K); // Copy the output back to host cudaMemcpy(host_y, device_y, B * M * (H-K+1)*(W-K+1)*sizeof(float), cudaMemcpyDeviceToHost); // Free device memory cudaFree(device_y); cudaFree(device_x); //cudaFree(device_k); // Useful snippet for error checking // cudaError_t error = cudaGetLastError(); // if(error != cudaSuccess) // { // std::cout<<"CUDA error: "<<cudaGetErrorString(error)<<std::endl; // exit(-1); // } } __host__ void GPUInterface::get_device_properties() { int deviceCount; cudaGetDeviceCount(&deviceCount); for(int dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl; std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl; std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl; std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl; std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl; std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl; std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl; std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl; std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl; } }
bae1dfe3d5ce829627179a7b707de0bfdd71a90c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <iostream> #include <vector> #include <cmath> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "reduction.h" using namespace std; __global__ void vecDiffKernel(double *A, double *B, double *C, int n) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) C[i] = fabs(A[i] - B[i]); } void vecDiffWrapper(double * A, double * B, double * C, int n) { int size = n * sizeof(double); double *d_A, *d_B, *d_C; hipMalloc((void**) &d_A, size); hipMalloc((void**) &d_B, size); hipMalloc((void**) &d_C, size); hipMemcpy(d_A, A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, B, size, hipMemcpyHostToDevice); dim3 dimGrid(ceil(n/256.0), 1, 1); dim3 dimBlock(256, 1, 1); hipLaunchKernelGGL(( vecDiffKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, n); hipMemcpy(C, d_C, size, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); } double reduceSumWrapper(double * A, int n) { int size = n * sizeof(double); double *d_A, *d_out; hipMalloc((void**) &d_A, size); hipMemcpy(d_A, A, size, hipMemcpyHostToDevice); int num_threads = 256; int num_blocks = ceil(n/(float)num_threads); int size_out = num_blocks*sizeof(double); hipMalloc((void**) &d_out, size_out); double * h_out = new double[num_blocks]; dim3 dimGrid(num_blocks, 1, 1); dim3 dimBlock(num_threads, 1, 1); reduce<double>(n, num_threads, num_blocks, 6, d_A, d_out); hipMemcpy(h_out, d_out, size_out, hipMemcpyDeviceToHost); double sum = 0.0; for (int i=0; i < num_blocks; i++) { sum += h_out[i]; } hipFree(d_A); hipFree(d_out); delete h_out; return sum; }
bae1dfe3d5ce829627179a7b707de0bfdd71a90c.cu
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <vector> #include <cmath> #include <cuda.h> #include <cuda_runtime.h> #include "reduction.h" using namespace std; __global__ void vecDiffKernel(double *A, double *B, double *C, int n) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) C[i] = fabs(A[i] - B[i]); } void vecDiffWrapper(double * A, double * B, double * C, int n) { int size = n * sizeof(double); double *d_A, *d_B, *d_C; cudaMalloc((void**) &d_A, size); cudaMalloc((void**) &d_B, size); cudaMalloc((void**) &d_C, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); dim3 dimGrid(ceil(n/256.0), 1, 1); dim3 dimBlock(256, 1, 1); vecDiffKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, n); cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } double reduceSumWrapper(double * A, int n) { int size = n * sizeof(double); double *d_A, *d_out; cudaMalloc((void**) &d_A, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); int num_threads = 256; int num_blocks = ceil(n/(float)num_threads); int size_out = num_blocks*sizeof(double); cudaMalloc((void**) &d_out, size_out); double * h_out = new double[num_blocks]; dim3 dimGrid(num_blocks, 1, 1); dim3 dimBlock(num_threads, 1, 1); reduce<double>(n, num_threads, num_blocks, 6, d_A, d_out); cudaMemcpy(h_out, d_out, size_out, cudaMemcpyDeviceToHost); double sum = 0.0; for (int i=0; i < num_blocks; i++) { sum += h_out[i]; } cudaFree(d_A); cudaFree(d_out); delete h_out; return sum; }
38f893c67b08fd8301cb2da1bb87a2a25a5506a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/graph_send_ue_recv_grad_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/funcs/elementwise_functor.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/gpu/graph_send_recv_funcs.h" #include "paddle/phi/kernels/gpu/graph_send_ue_recv_funcs.h" #include "paddle/phi/kernels/impl/graph_message_passing_impl.h" #include "paddle/phi/kernels/reduce_sum_kernel.h" namespace phi { template <typename Context, typename T, typename IndexT> void CalculateXEGradForMinMax(const Context& ctx, const T* out_grad, const T* x_data, const T* e_data, const phi::DDim& x_dims, const phi::DDim& e_dims, const IndexT* s_index, const IndexT* d_index, const std::string& message_op, const std::string& reduce_op, int64_t index_size, T* x_grad, T* e_grad, const DenseTensor* out = nullptr) { const T* out_data = out->data<T>(); const auto& bcast_info = phi::CalcBCastInfo(x_dims, e_dims); thrust::device_vector<int64_t> l_bcastoff, r_bcastoff; if (bcast_info.use_bcast) { CopyBCastOff(bcast_info, l_bcastoff, r_bcastoff); } int64_t out_len = bcast_info.out_len; const int ntx = FindNumThreads(out_len, ctx.GetMaxThreadsPerBlock()); const int nty = ctx.GetMaxThreadsPerBlock() / ntx; const int nbx = (out_len + ntx - 1) / ntx; const int nby = FindNumBlocks('y', (index_size + nty - 1) / nty); const dim3 grid(nbx, nby); const dim3 block(ntx, nty); if (message_op == "ADD") { hipLaunchKernelGGL(( ManipulateMinMaxGradCUDAKernelForAdd<T, IndexT>) , dim3(grid), dim3(block), 0, ctx.stream(), x_data, e_data, out_data, out_grad, d_index, s_index, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), x_grad, e_grad, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast); } else if (message_op == "MUL") { hipLaunchKernelGGL(( ManipulateMinMaxGradCUDAKernelForMul<T, IndexT>) , dim3(grid), dim3(block), 0, ctx.stream(), x_data, e_data, out_data, out_grad, d_index, s_index, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), x_grad, e_grad, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast); } } template <typename Context, typename T, typename IndexT> void CalculateXGrad(const Context& ctx, const T* out_grad, const T* x_data, const T* e_data, const phi::DDim& out_grad_dims, const phi::DDim& x_dims, const phi::DDim& e_dims, const IndexT* s_index, const IndexT* d_index, const std::string& message_op, const std::string& reduce_op, int64_t index_size, int64_t slice_size, T* x_grad, const DenseTensor& out_grad_tensor, const DenseTensor* dst_count = nullptr, const DenseTensor* out = nullptr) { #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int64_t n = slice_size * index_size; int max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0]; int64_t grid_tmp = (n + block - 1) / block; int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; std::vector<int64_t> reduce_idx; bool reduce = ReduceGrad(out_grad_dims, x_dims, reduce_idx); if (reduce_op == "SUM") { if (message_op == "ADD") { GraphSendRecvSumCUDAFunctor<T, IndexT> functor; if (!reduce) { hipLaunchKernelGGL(( GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvSumCUDAFunctor<T, IndexT>>) , dim3(grid), dim3(block), 0, ctx.stream(), out_grad, d_index, s_index, x_grad, index_size, slice_size, functor); } else { const auto& bcast_info = phi::CalcBCastInfo(out_grad_dims, e_dims); DenseTensor x_grad_v2 = phi::EmptyLike<T, Context>(ctx, out_grad_tensor); phi::funcs::SetConstant<Context, T>()(ctx, &x_grad_v2, T(0)); T* x_grad_v2_data = x_grad_v2.data<T>(); hipLaunchKernelGGL(( GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvSumCUDAFunctor<T, IndexT>>) , dim3(grid), dim3(block), 0, ctx.stream(), out_grad, d_index, s_index, x_grad_v2_data, index_size, bcast_info.out_len, functor); // Run reduce_sum DenseTensor x_grad_out = phi::Sum<T, Context>( ctx, x_grad_v2, phi::IntArray(reduce_idx), paddle::experimental::CppTypeToDataType<T>::Type(), true); #ifdef PADDLE_WITH_HIP hipMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), hipMemcpyDeviceToDevice); #else hipMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), hipMemcpyDeviceToDevice); #endif } } else if (message_op == "MUL") { const auto& bcast_info = phi::CalcBCastInfo(out_grad_dims, e_dims); thrust::device_vector<int64_t> l_bcastoff, r_bcastoff; if (bcast_info.use_bcast) { CopyBCastOff(bcast_info, l_bcastoff, r_bcastoff); } int64_t out_len = bcast_info.out_len; const int ntx = FindNumThreads(out_len, ctx.GetMaxThreadsPerBlock()); const int nty = ctx.GetMaxThreadsPerBlock() / ntx; const int nbx = (out_len + ntx - 1) / ntx; const int nby = FindNumBlocks('y', (index_size + nty - 1) / nty); const dim3 grid_(nbx, nby); const dim3 block_(ntx, nty); funcs::MultiplyFunctor<T> mul_functor; GraphSendUERecvSumCUDAFunctor<T> sum_functor; if (!reduce) { hipLaunchKernelGGL(( GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvSumCUDAFunctor<T>, funcs::MultiplyFunctor<T>>) , dim3(grid_), dim3(block_), 0, ctx.stream(), out_grad, e_data, d_index, s_index, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), x_grad, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, mul_functor, sum_functor); } else { DenseTensor x_grad_v2 = phi::EmptyLike<T, Context>(ctx, out_grad_tensor); phi::funcs::SetConstant<Context, T>()(ctx, &x_grad_v2, T(0)); T* x_grad_v2_data = x_grad_v2.data<T>(); hipLaunchKernelGGL(( GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvSumCUDAFunctor<T>, funcs::MultiplyFunctor<T>>) , dim3(grid_), dim3(block_), 0, ctx.stream(), out_grad, e_data, d_index, s_index, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), x_grad_v2_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, mul_functor, sum_functor); DenseTensor x_grad_out = phi::Sum<T, Context>( ctx, x_grad_v2, phi::IntArray(reduce_idx), paddle::experimental::CppTypeToDataType<T>::Type(), true); #ifdef PADDLE_WITH_HIP hipMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), hipMemcpyDeviceToDevice); #else hipMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), hipMemcpyDeviceToDevice); #endif } } } else if (reduce_op == "MEAN") { const int* s_count = dst_count->data<int>(); if (message_op == "ADD") { if (!reduce) { hipLaunchKernelGGL(( ManipulateMeanGradCUDAKernel<T, IndexT>) , dim3(grid), dim3(block), 0, ctx.stream(), out_grad, d_index, s_index, x_grad, index_size, slice_size, s_count); } else { const auto& bcast_info = phi::CalcBCastInfo(out_grad_dims, e_dims); DenseTensor x_grad_v2 = phi::EmptyLike<T, Context>(ctx, out_grad_tensor); phi::funcs::SetConstant<Context, T>()(ctx, &x_grad_v2, T(0)); T* x_grad_v2_data = x_grad_v2.data<T>(); hipLaunchKernelGGL(( ManipulateMeanGradCUDAKernel<T, IndexT>) , dim3(grid), dim3(block), 0, ctx.stream(), out_grad, d_index, s_index, x_grad_v2_data, index_size, bcast_info.out_len, s_count); // Run reduce_sum DenseTensor x_grad_out = phi::Sum<T, Context>( ctx, x_grad_v2, phi::IntArray(reduce_idx), paddle::experimental::CppTypeToDataType<T>::Type(), true); #ifdef PADDLE_WITH_HIP hipMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), hipMemcpyDeviceToDevice); #else hipMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), hipMemcpyDeviceToDevice); #endif } } else if (message_op == "MUL") { const auto& bcast_info = phi::CalcBCastInfo(out_grad_dims, e_dims); thrust::device_vector<int64_t> l_bcastoff, r_bcastoff; if (bcast_info.use_bcast) { CopyBCastOff(bcast_info, l_bcastoff, r_bcastoff); } int64_t out_len = bcast_info.out_len; const int ntx = FindNumThreads(out_len, ctx.GetMaxThreadsPerBlock()); const int nty = ctx.GetMaxThreadsPerBlock() / ntx; const int nbx = (out_len + ntx - 1) / ntx; const int nby = FindNumBlocks('y', (index_size + nty - 1) / nty); const dim3 grid_(nbx, nby); const dim3 block_(ntx, nty); if (!reduce) { hipLaunchKernelGGL(( ManipulateMeanGradCUDAKernelForMulX<T, IndexT>) , dim3(grid_), dim3(block_), 0, ctx.stream(), out_grad, e_data, d_index, s_index, s_count, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), x_grad, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast); } else { DenseTensor x_grad_v2 = phi::EmptyLike<T, Context>(ctx, out_grad_tensor); phi::funcs::SetConstant<Context, T>()(ctx, &x_grad_v2, T(0)); T* x_grad_v2_data = x_grad_v2.data<T>(); hipLaunchKernelGGL(( ManipulateMeanGradCUDAKernelForMulX<T, IndexT>) , dim3(grid_), dim3(block_), 0, ctx.stream(), out_grad, e_data, d_index, s_index, s_count, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), x_grad_v2_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast); // Run reduce_sum DenseTensor x_grad_out = phi::Sum<T, Context>( ctx, x_grad_v2, phi::IntArray(reduce_idx), paddle::experimental::CppTypeToDataType<T>::Type(), true); // TODO(daisiming): Whether use x_grad instead. #ifdef PADDLE_WITH_HIP hipMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), hipMemcpyDeviceToDevice); #else hipMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), hipMemcpyDeviceToDevice); #endif } } } } template <typename Context, typename T, typename IndexT> void CalculateEGrad(const Context& ctx, const T* out_grad, const T* x_data, const T* e_data, const phi::DDim& x_dims, const phi::DDim& e_dims, const IndexT* s_index, const IndexT* d_index, const std::string& message_op, const std::string& reduce_op, int64_t index_size, T* e_grad, const DenseTensor* dst_count = nullptr) { const auto& bcast_info = phi::CalcBCastInfo(x_dims, e_dims); thrust::device_vector<int64_t> l_bcastoff, r_bcastoff; if (bcast_info.use_bcast) { CopyBCastOff(bcast_info, l_bcastoff, r_bcastoff); } int64_t out_len = bcast_info.out_len; const int ntx = FindNumThreads(out_len, ctx.GetMaxThreadsPerBlock()); const int nty = ctx.GetMaxThreadsPerBlock() / ntx; const int nbx = (out_len + ntx - 1) / ntx; const int nby = FindNumBlocks('y', (index_size + nty - 1) / nty); const dim3 grid(nbx, nby); const dim3 block(ntx, nty); if (reduce_op == "SUM") { if (message_op == "ADD") { hipLaunchKernelGGL(( ManipulateSumGradCUDAKernelForAddE<T, IndexT>) , dim3(grid), dim3(block), 0, ctx.stream(), out_grad, d_index, thrust::raw_pointer_cast(r_bcastoff.data()), e_grad, index_size, bcast_info.r_len, out_len, bcast_info.use_bcast); } else if (message_op == "MUL") { hipLaunchKernelGGL(( ManipulateSumGradCUDAKernelForMulE<T, IndexT>) , dim3(grid), dim3(block), 0, ctx.stream(), x_data, out_grad, s_index, d_index, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), e_grad, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast); } } else if (reduce_op == "MEAN") { const int* s_count = dst_count->data<int>(); if (message_op == "ADD") { hipLaunchKernelGGL(( ManipulateMeanGradCUDAKernelForAddE<T, IndexT>) , dim3(grid), dim3(block), 0, ctx.stream(), out_grad, d_index, s_count, thrust::raw_pointer_cast(r_bcastoff.data()), e_grad, index_size, bcast_info.r_len, out_len, bcast_info.use_bcast); } else if (message_op == "MUL") { hipLaunchKernelGGL(( ManipulateMeanGradCUDAKernelForMulE<T, IndexT>) , dim3(grid), dim3(block), 0, ctx.stream(), x_data, out_grad, s_index, d_index, s_count, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), e_grad, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast); } } } template <typename Context, typename T, typename IndexT> void GraphSendUERecvGradOpCUDAKernelLaunchHelper( const Context& ctx, const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& e, const DenseTensor& src_index, const DenseTensor& dst_index, const std::string& message_op, const std::string& reduce_op, DenseTensor* x_grad, DenseTensor* e_grad, const DenseTensor* dst_count = nullptr, const DenseTensor* out = nullptr) { const int& index_size = dst_index.dims()[0]; ctx.template Alloc<T>(x_grad); T* x_grad_data = x_grad->data<T>(); ctx.template Alloc<T>(e_grad); T* e_grad_data = e_grad->data<T>(); const auto& x_dims = x.dims(); const auto& e_dims = e.dims(); int64_t memset_size_x = 1, memset_size_e = 1; int64_t slice_size = 1; for (int i = 0; i < x_dims.size(); i++) { memset_size_x *= x_dims[i]; if (i > 0) slice_size *= x_dims[i]; } for (int i = 0; i < e_dims.size(); i++) { memset_size_e *= e_dims[i]; } const size_t& memset_bytes_x = memset_size_x * sizeof(T); const size_t& memset_bytes_e = memset_size_e * sizeof(T); #ifdef PADDLE_WITH_HIP hipMemset(x_grad_data, 0, memset_bytes_x); hipMemset(e_grad_data, 0, memset_bytes_e); #else hipMemset(x_grad_data, 0, memset_bytes_x); hipMemset(e_grad_data, 0, memset_bytes_e); #endif if (index_size == 0) return; const T* out_grad_data = out_grad.data<T>(); const T* x_data = x.data<T>(); const T* e_data = e.data<T>(); const IndexT* s_index = src_index.data<IndexT>(); const IndexT* d_index = dst_index.data<IndexT>(); if (reduce_op == "SUM" || reduce_op == "MEAN") { CalculateXGrad<Context, T, IndexT>(ctx, out_grad_data, x_data, e_data, out_grad.dims(), x_dims, e_dims, s_index, d_index, message_op, reduce_op, index_size, slice_size, x_grad_data, out_grad, dst_count, out); CalculateEGrad<Context, T, IndexT>(ctx, out_grad_data, x_data, e_data, x_dims, e_dims, s_index, d_index, message_op, reduce_op, index_size, e_grad_data, dst_count); } else if (reduce_op == "MIN" || reduce_op == "MAX") { CalculateXEGradForMinMax<Context, T, IndexT>(ctx, out_grad_data, x_data, e_data, x_dims, e_dims, s_index, d_index, message_op, reduce_op, index_size, x_grad_data, e_grad_data, out); } } template <typename T, typename Context> void GraphSendUERecvGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& src_index, const DenseTensor& dst_index, const paddle::optional<DenseTensor>& out, const paddle::optional<DenseTensor>& dst_count, const DenseTensor& out_grad, const std::string& message_op, const std::string& reduce_op, DenseTensor* x_grad, DenseTensor* y_grad) { auto index_type = src_index.dtype(); if (index_type == phi::DataType::INT32) { GraphSendUERecvGradOpCUDAKernelLaunchHelper<Context, T, int32_t>( ctx, out_grad, x, y, src_index, dst_index, message_op, reduce_op, x_grad, y_grad, dst_count.get_ptr(), out.get_ptr()); } else if (index_type == phi::DataType::INT64) { GraphSendUERecvGradOpCUDAKernelLaunchHelper<Context, T, int64_t>( ctx, out_grad, x, y, src_index, dst_index, message_op, reduce_op, x_grad, y_grad, dst_count.get_ptr(), out.get_ptr()); } } } // namespace phi PD_REGISTER_KERNEL(graph_send_ue_recv_grad, GPU, ALL_LAYOUT, phi::GraphSendUERecvGradKernel, float, double, int, int64_t, phi::dtype::float16) {}
38f893c67b08fd8301cb2da1bb87a2a25a5506a4.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/graph_send_ue_recv_grad_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/funcs/elementwise_functor.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/gpu/graph_send_recv_funcs.h" #include "paddle/phi/kernels/gpu/graph_send_ue_recv_funcs.h" #include "paddle/phi/kernels/impl/graph_message_passing_impl.h" #include "paddle/phi/kernels/reduce_sum_kernel.h" namespace phi { template <typename Context, typename T, typename IndexT> void CalculateXEGradForMinMax(const Context& ctx, const T* out_grad, const T* x_data, const T* e_data, const phi::DDim& x_dims, const phi::DDim& e_dims, const IndexT* s_index, const IndexT* d_index, const std::string& message_op, const std::string& reduce_op, int64_t index_size, T* x_grad, T* e_grad, const DenseTensor* out = nullptr) { const T* out_data = out->data<T>(); const auto& bcast_info = phi::CalcBCastInfo(x_dims, e_dims); thrust::device_vector<int64_t> l_bcastoff, r_bcastoff; if (bcast_info.use_bcast) { CopyBCastOff(bcast_info, l_bcastoff, r_bcastoff); } int64_t out_len = bcast_info.out_len; const int ntx = FindNumThreads(out_len, ctx.GetMaxThreadsPerBlock()); const int nty = ctx.GetMaxThreadsPerBlock() / ntx; const int nbx = (out_len + ntx - 1) / ntx; const int nby = FindNumBlocks('y', (index_size + nty - 1) / nty); const dim3 grid(nbx, nby); const dim3 block(ntx, nty); if (message_op == "ADD") { ManipulateMinMaxGradCUDAKernelForAdd<T, IndexT> <<<grid, block, 0, ctx.stream()>>>( x_data, e_data, out_data, out_grad, d_index, s_index, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), x_grad, e_grad, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast); } else if (message_op == "MUL") { ManipulateMinMaxGradCUDAKernelForMul<T, IndexT> <<<grid, block, 0, ctx.stream()>>>( x_data, e_data, out_data, out_grad, d_index, s_index, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), x_grad, e_grad, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast); } } template <typename Context, typename T, typename IndexT> void CalculateXGrad(const Context& ctx, const T* out_grad, const T* x_data, const T* e_data, const phi::DDim& out_grad_dims, const phi::DDim& x_dims, const phi::DDim& e_dims, const IndexT* s_index, const IndexT* d_index, const std::string& message_op, const std::string& reduce_op, int64_t index_size, int64_t slice_size, T* x_grad, const DenseTensor& out_grad_tensor, const DenseTensor* dst_count = nullptr, const DenseTensor* out = nullptr) { #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int64_t n = slice_size * index_size; int max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0]; int64_t grid_tmp = (n + block - 1) / block; int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; std::vector<int64_t> reduce_idx; bool reduce = ReduceGrad(out_grad_dims, x_dims, reduce_idx); if (reduce_op == "SUM") { if (message_op == "ADD") { GraphSendRecvSumCUDAFunctor<T, IndexT> functor; if (!reduce) { GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvSumCUDAFunctor<T, IndexT>> <<<grid, block, 0, ctx.stream()>>>(out_grad, d_index, s_index, x_grad, index_size, slice_size, functor); } else { const auto& bcast_info = phi::CalcBCastInfo(out_grad_dims, e_dims); DenseTensor x_grad_v2 = phi::EmptyLike<T, Context>(ctx, out_grad_tensor); phi::funcs::SetConstant<Context, T>()(ctx, &x_grad_v2, T(0)); T* x_grad_v2_data = x_grad_v2.data<T>(); GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvSumCUDAFunctor<T, IndexT>> <<<grid, block, 0, ctx.stream()>>>(out_grad, d_index, s_index, x_grad_v2_data, index_size, bcast_info.out_len, functor); // Run reduce_sum DenseTensor x_grad_out = phi::Sum<T, Context>( ctx, x_grad_v2, phi::IntArray(reduce_idx), paddle::experimental::CppTypeToDataType<T>::Type(), true); #ifdef PADDLE_WITH_HIP hipMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), hipMemcpyDeviceToDevice); #else cudaMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), cudaMemcpyDeviceToDevice); #endif } } else if (message_op == "MUL") { const auto& bcast_info = phi::CalcBCastInfo(out_grad_dims, e_dims); thrust::device_vector<int64_t> l_bcastoff, r_bcastoff; if (bcast_info.use_bcast) { CopyBCastOff(bcast_info, l_bcastoff, r_bcastoff); } int64_t out_len = bcast_info.out_len; const int ntx = FindNumThreads(out_len, ctx.GetMaxThreadsPerBlock()); const int nty = ctx.GetMaxThreadsPerBlock() / ntx; const int nbx = (out_len + ntx - 1) / ntx; const int nby = FindNumBlocks('y', (index_size + nty - 1) / nty); const dim3 grid_(nbx, nby); const dim3 block_(ntx, nty); funcs::MultiplyFunctor<T> mul_functor; GraphSendUERecvSumCUDAFunctor<T> sum_functor; if (!reduce) { GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvSumCUDAFunctor<T>, funcs::MultiplyFunctor<T>> <<<grid_, block_, 0, ctx.stream()>>>( out_grad, e_data, d_index, s_index, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), x_grad, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, mul_functor, sum_functor); } else { DenseTensor x_grad_v2 = phi::EmptyLike<T, Context>(ctx, out_grad_tensor); phi::funcs::SetConstant<Context, T>()(ctx, &x_grad_v2, T(0)); T* x_grad_v2_data = x_grad_v2.data<T>(); GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvSumCUDAFunctor<T>, funcs::MultiplyFunctor<T>> <<<grid_, block_, 0, ctx.stream()>>>( out_grad, e_data, d_index, s_index, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), x_grad_v2_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, mul_functor, sum_functor); DenseTensor x_grad_out = phi::Sum<T, Context>( ctx, x_grad_v2, phi::IntArray(reduce_idx), paddle::experimental::CppTypeToDataType<T>::Type(), true); #ifdef PADDLE_WITH_HIP hipMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), hipMemcpyDeviceToDevice); #else cudaMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), cudaMemcpyDeviceToDevice); #endif } } } else if (reduce_op == "MEAN") { const int* s_count = dst_count->data<int>(); if (message_op == "ADD") { if (!reduce) { ManipulateMeanGradCUDAKernel<T, IndexT> <<<grid, block, 0, ctx.stream()>>>(out_grad, d_index, s_index, x_grad, index_size, slice_size, s_count); } else { const auto& bcast_info = phi::CalcBCastInfo(out_grad_dims, e_dims); DenseTensor x_grad_v2 = phi::EmptyLike<T, Context>(ctx, out_grad_tensor); phi::funcs::SetConstant<Context, T>()(ctx, &x_grad_v2, T(0)); T* x_grad_v2_data = x_grad_v2.data<T>(); ManipulateMeanGradCUDAKernel<T, IndexT> <<<grid, block, 0, ctx.stream()>>>(out_grad, d_index, s_index, x_grad_v2_data, index_size, bcast_info.out_len, s_count); // Run reduce_sum DenseTensor x_grad_out = phi::Sum<T, Context>( ctx, x_grad_v2, phi::IntArray(reduce_idx), paddle::experimental::CppTypeToDataType<T>::Type(), true); #ifdef PADDLE_WITH_HIP hipMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), hipMemcpyDeviceToDevice); #else cudaMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), cudaMemcpyDeviceToDevice); #endif } } else if (message_op == "MUL") { const auto& bcast_info = phi::CalcBCastInfo(out_grad_dims, e_dims); thrust::device_vector<int64_t> l_bcastoff, r_bcastoff; if (bcast_info.use_bcast) { CopyBCastOff(bcast_info, l_bcastoff, r_bcastoff); } int64_t out_len = bcast_info.out_len; const int ntx = FindNumThreads(out_len, ctx.GetMaxThreadsPerBlock()); const int nty = ctx.GetMaxThreadsPerBlock() / ntx; const int nbx = (out_len + ntx - 1) / ntx; const int nby = FindNumBlocks('y', (index_size + nty - 1) / nty); const dim3 grid_(nbx, nby); const dim3 block_(ntx, nty); if (!reduce) { ManipulateMeanGradCUDAKernelForMulX<T, IndexT> <<<grid_, block_, 0, ctx.stream()>>>( out_grad, e_data, d_index, s_index, s_count, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), x_grad, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast); } else { DenseTensor x_grad_v2 = phi::EmptyLike<T, Context>(ctx, out_grad_tensor); phi::funcs::SetConstant<Context, T>()(ctx, &x_grad_v2, T(0)); T* x_grad_v2_data = x_grad_v2.data<T>(); ManipulateMeanGradCUDAKernelForMulX<T, IndexT> <<<grid_, block_, 0, ctx.stream()>>>( out_grad, e_data, d_index, s_index, s_count, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), x_grad_v2_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast); // Run reduce_sum DenseTensor x_grad_out = phi::Sum<T, Context>( ctx, x_grad_v2, phi::IntArray(reduce_idx), paddle::experimental::CppTypeToDataType<T>::Type(), true); // TODO(daisiming): Whether use x_grad instead. #ifdef PADDLE_WITH_HIP hipMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), hipMemcpyDeviceToDevice); #else cudaMemcpy(x_grad, x_grad_out.data<T>(), x_grad_out.numel() * sizeof(T), cudaMemcpyDeviceToDevice); #endif } } } } template <typename Context, typename T, typename IndexT> void CalculateEGrad(const Context& ctx, const T* out_grad, const T* x_data, const T* e_data, const phi::DDim& x_dims, const phi::DDim& e_dims, const IndexT* s_index, const IndexT* d_index, const std::string& message_op, const std::string& reduce_op, int64_t index_size, T* e_grad, const DenseTensor* dst_count = nullptr) { const auto& bcast_info = phi::CalcBCastInfo(x_dims, e_dims); thrust::device_vector<int64_t> l_bcastoff, r_bcastoff; if (bcast_info.use_bcast) { CopyBCastOff(bcast_info, l_bcastoff, r_bcastoff); } int64_t out_len = bcast_info.out_len; const int ntx = FindNumThreads(out_len, ctx.GetMaxThreadsPerBlock()); const int nty = ctx.GetMaxThreadsPerBlock() / ntx; const int nbx = (out_len + ntx - 1) / ntx; const int nby = FindNumBlocks('y', (index_size + nty - 1) / nty); const dim3 grid(nbx, nby); const dim3 block(ntx, nty); if (reduce_op == "SUM") { if (message_op == "ADD") { ManipulateSumGradCUDAKernelForAddE<T, IndexT> <<<grid, block, 0, ctx.stream()>>>( out_grad, d_index, thrust::raw_pointer_cast(r_bcastoff.data()), e_grad, index_size, bcast_info.r_len, out_len, bcast_info.use_bcast); } else if (message_op == "MUL") { ManipulateSumGradCUDAKernelForMulE<T, IndexT> <<<grid, block, 0, ctx.stream()>>>( x_data, out_grad, s_index, d_index, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), e_grad, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast); } } else if (reduce_op == "MEAN") { const int* s_count = dst_count->data<int>(); if (message_op == "ADD") { ManipulateMeanGradCUDAKernelForAddE<T, IndexT> <<<grid, block, 0, ctx.stream()>>>( out_grad, d_index, s_count, thrust::raw_pointer_cast(r_bcastoff.data()), e_grad, index_size, bcast_info.r_len, out_len, bcast_info.use_bcast); } else if (message_op == "MUL") { ManipulateMeanGradCUDAKernelForMulE<T, IndexT> <<<grid, block, 0, ctx.stream()>>>( x_data, out_grad, s_index, d_index, s_count, thrust::raw_pointer_cast(l_bcastoff.data()), thrust::raw_pointer_cast(r_bcastoff.data()), e_grad, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast); } } } template <typename Context, typename T, typename IndexT> void GraphSendUERecvGradOpCUDAKernelLaunchHelper( const Context& ctx, const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& e, const DenseTensor& src_index, const DenseTensor& dst_index, const std::string& message_op, const std::string& reduce_op, DenseTensor* x_grad, DenseTensor* e_grad, const DenseTensor* dst_count = nullptr, const DenseTensor* out = nullptr) { const int& index_size = dst_index.dims()[0]; ctx.template Alloc<T>(x_grad); T* x_grad_data = x_grad->data<T>(); ctx.template Alloc<T>(e_grad); T* e_grad_data = e_grad->data<T>(); const auto& x_dims = x.dims(); const auto& e_dims = e.dims(); int64_t memset_size_x = 1, memset_size_e = 1; int64_t slice_size = 1; for (int i = 0; i < x_dims.size(); i++) { memset_size_x *= x_dims[i]; if (i > 0) slice_size *= x_dims[i]; } for (int i = 0; i < e_dims.size(); i++) { memset_size_e *= e_dims[i]; } const size_t& memset_bytes_x = memset_size_x * sizeof(T); const size_t& memset_bytes_e = memset_size_e * sizeof(T); #ifdef PADDLE_WITH_HIP hipMemset(x_grad_data, 0, memset_bytes_x); hipMemset(e_grad_data, 0, memset_bytes_e); #else cudaMemset(x_grad_data, 0, memset_bytes_x); cudaMemset(e_grad_data, 0, memset_bytes_e); #endif if (index_size == 0) return; const T* out_grad_data = out_grad.data<T>(); const T* x_data = x.data<T>(); const T* e_data = e.data<T>(); const IndexT* s_index = src_index.data<IndexT>(); const IndexT* d_index = dst_index.data<IndexT>(); if (reduce_op == "SUM" || reduce_op == "MEAN") { CalculateXGrad<Context, T, IndexT>(ctx, out_grad_data, x_data, e_data, out_grad.dims(), x_dims, e_dims, s_index, d_index, message_op, reduce_op, index_size, slice_size, x_grad_data, out_grad, dst_count, out); CalculateEGrad<Context, T, IndexT>(ctx, out_grad_data, x_data, e_data, x_dims, e_dims, s_index, d_index, message_op, reduce_op, index_size, e_grad_data, dst_count); } else if (reduce_op == "MIN" || reduce_op == "MAX") { CalculateXEGradForMinMax<Context, T, IndexT>(ctx, out_grad_data, x_data, e_data, x_dims, e_dims, s_index, d_index, message_op, reduce_op, index_size, x_grad_data, e_grad_data, out); } } template <typename T, typename Context> void GraphSendUERecvGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& src_index, const DenseTensor& dst_index, const paddle::optional<DenseTensor>& out, const paddle::optional<DenseTensor>& dst_count, const DenseTensor& out_grad, const std::string& message_op, const std::string& reduce_op, DenseTensor* x_grad, DenseTensor* y_grad) { auto index_type = src_index.dtype(); if (index_type == phi::DataType::INT32) { GraphSendUERecvGradOpCUDAKernelLaunchHelper<Context, T, int32_t>( ctx, out_grad, x, y, src_index, dst_index, message_op, reduce_op, x_grad, y_grad, dst_count.get_ptr(), out.get_ptr()); } else if (index_type == phi::DataType::INT64) { GraphSendUERecvGradOpCUDAKernelLaunchHelper<Context, T, int64_t>( ctx, out_grad, x, y, src_index, dst_index, message_op, reduce_op, x_grad, y_grad, dst_count.get_ptr(), out.get_ptr()); } } } // namespace phi PD_REGISTER_KERNEL(graph_send_ue_recv_grad, GPU, ALL_LAYOUT, phi::GraphSendUERecvGradKernel, float, double, int, int64_t, phi::dtype::float16) {}
7222173881a26e04cfa2bc183dfb56049a43bbb0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //CUDA code for matrix multiplicationn . The values of a,b,c,q have to changed according to N #include<stdlib.h> #include<stdio.h> #include<iostream> #include<cuda_runtime.h> __global__ void Product (float *a, float *b, float *c) { // Out of all the threads created each one computes 1 value of C and stores into cval float cval = 0.00; int R = blockIdx.y * blockDim.y + threadIdx.y; //Row of the matrix int C = blockIdx.x * blockDim.x + threadIdx.x; //Column of the matrix //Defining the size of the matrix// int N=1000; if(R> N || C > N ){ return; } for (int j = 0; j < N; j++) { cval += a[R * N+ j] *b[j * N + C]; } c[R * N + C]+= cval; } using namespace std; int main(){ //The timing function hipEvent_t start,stop; float time; int N=5000; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); static float a[25000000],b[25000000]; static float c[25000000]; //Inputting values in the matrix long int q = 25000000; // Standard int runs out of memory so long int used int i=0; //For checking the matrix multiplication all entries are 1 while(i != q) { a[i] = 1; b[i] = 1; i++; } int o=0; //for(int m=0;m<N;m++){ //for(int n=0;n<N;n++){ //a[o]=m+n; //b[o]=m*n; //o=o+1; //}} //This section is the GPU part float *device_a, *device_b, *device_c; dim3 griddimension(500,500); // The dimension of the total grid (Blocks) dim3 blockdimension(10,10); // The dimension of one block ( threads in one block) //Allocating memory in the device for the matrices: device_a,b,c are device variables hipMalloc( (void**)&device_c, q * sizeof(float) ); hipMalloc( (void**)&device_b, q * sizeof(float) ); hipMalloc( (void**)&device_a, q * sizeof(float) ); //Copying the variables from CPU to GPU hipMemcpy( device_a,a,q * sizeof(float),hipMemcpyHostToDevice ); hipMemcpy( device_b,b,q * sizeof(float),hipMemcpyHostToDevice ); hipMemcpy( device_c,c,q * sizeof(float),hipMemcpyHostToDevice ); hipLaunchKernelGGL(( Product), dim3(griddimension), dim3(blockdimension), 0, 0, device_a, device_b, device_c ); //The device function Product is called hipMemcpy( c,device_c,q * sizeof(float),hipMemcpyDeviceToHost ); hipFree( device_a ); hipFree( device_b ); hipFree( device_c ); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); cout<<"\n\nTime = "<<time<<" ms"; //For printing the matrix long int g=N*N,d=0; while(d!=g){ printf("%f\n",c[d]); d=d+1; } //} }
7222173881a26e04cfa2bc183dfb56049a43bbb0.cu
//CUDA code for matrix multiplicationn . The values of a,b,c,q have to changed according to N #include<stdlib.h> #include<stdio.h> #include<iostream> #include<cuda_runtime.h> __global__ void Product (float *a, float *b, float *c) { // Out of all the threads created each one computes 1 value of C and stores into cval float cval = 0.00; int R = blockIdx.y * blockDim.y + threadIdx.y; //Row of the matrix int C = blockIdx.x * blockDim.x + threadIdx.x; //Column of the matrix //Defining the size of the matrix// int N=1000; if(R> N || C > N ){ return; } for (int j = 0; j < N; j++) { cval += a[R * N+ j] *b[j * N + C]; } c[R * N + C]+= cval; } using namespace std; int main(){ //The timing function cudaEvent_t start,stop; float time; int N=5000; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); static float a[25000000],b[25000000]; static float c[25000000]; //Inputting values in the matrix long int q = 25000000; // Standard int runs out of memory so long int used int i=0; //For checking the matrix multiplication all entries are 1 while(i != q) { a[i] = 1; b[i] = 1; i++; } int o=0; //for(int m=0;m<N;m++){ //for(int n=0;n<N;n++){ //a[o]=m+n; //b[o]=m*n; //o=o+1; //}} //This section is the GPU part float *device_a, *device_b, *device_c; dim3 griddimension(500,500); // The dimension of the total grid (Blocks) dim3 blockdimension(10,10); // The dimension of one block ( threads in one block) //Allocating memory in the device for the matrices: device_a,b,c are device variables cudaMalloc( (void**)&device_c, q * sizeof(float) ); cudaMalloc( (void**)&device_b, q * sizeof(float) ); cudaMalloc( (void**)&device_a, q * sizeof(float) ); //Copying the variables from CPU to GPU cudaMemcpy( device_a,a,q * sizeof(float),cudaMemcpyHostToDevice ); cudaMemcpy( device_b,b,q * sizeof(float),cudaMemcpyHostToDevice ); cudaMemcpy( device_c,c,q * sizeof(float),cudaMemcpyHostToDevice ); Product<<<griddimension, blockdimension>>>( device_a, device_b, device_c ); //The device function Product is called cudaMemcpy( c,device_c,q * sizeof(float),cudaMemcpyDeviceToHost ); cudaFree( device_a ); cudaFree( device_b ); cudaFree( device_c ); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); cout<<"\n\nTime = "<<time<<" ms"; //For printing the matrix long int g=N*N,d=0; while(d!=g){ printf("%f\n",c[d]); d=d+1; } //} }
b4598b95aeb01c1c3b3e53b97926db0043948652.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "hip/hip_runtime.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { hipError_t error = hipGetLastError (); if (error != hipSuccess) { printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error)); exit(-1); } } __global__ void __launch_bounds__ (128,2) sw4_a (double * uacc_0, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * __restrict__ strx, double * __restrict__ stry, double * __restrict__ strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; /* Total 687 flops */ if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 3 for (int k=2; k<=N-3; k+=2) { double a_mux1; double _t_0_; double a_mux2; double _t_1_; double a_mux3; double _t_2_; double a_mux4; double _t_3_; double a_muy1; double _t_4_; double a_muy2; double _t_5_; double a_muy3; double _t_6_; double a_muy4; double _t_7_; double a_muz1; double _t_8_; double a_muz2; double _t_9_; double a_muz3; double _t_10_; double a_muz4; double _t_11_; double _t_14_; double _t_16_; double _t_15_; double _t_13_; double _t_17_; double _t_19_; double _t_18_; double _t_20_; double _t_22_; double _t_21_; double _t_23_; double _t_25_; double _t_24_; double _t_12_; double _t_27_; double _t_26_; double _t_28_; double _t_29_; double _t_30_; double _t_32_; double _t_31_; double _t_33_; double _t_34_; double _t_35_; double r1; double _t_39_; double _t_37_; double _t_40_; double _t_41_; double _t_38_; double _t_43_; double _t_44_; double _t_42_; double _t_46_; double _t_47_; double _t_45_; double _t_48_; double _t_49_; double _t_36_; double _t_52_; double _t_50_; double _t_53_; double _t_54_; double _t_51_; double _t_56_; double _t_57_; double _t_55_; double _t_59_; double _t_60_; double _t_58_; double _t_61_; double _t_62_; double _t_65_; double _t_63_; double _t_66_; double _t_67_; double _t_64_; double _t_69_; double _t_70_; double _t_68_; double _t_72_; double _t_73_; double _t_71_; double _t_74_; double _t_75_; double _t_78_; double _t_76_; double _t_79_; double _t_80_; double _t_77_; double _t_82_; double _t_83_; double _t_81_; double _t_85_; double _t_86_; double _t_84_; double _t_87_; double _t_88_; double uacc_0kc0jc0ic0 = uacc_0[k*N*N+j*N+i]; double b_mux1; double _t_89_; double b_mux2; double _t_90_; double b_mux3; double _t_91_; double b_mux4; double _t_92_; double b_muy1; double _t_93_; double b_muy2; double _t_94_; double b_muy3; double _t_95_; double b_muy4; double _t_96_; double b_muz1; double _t_97_; double b_muz2; double _t_98_; double b_muz3; double _t_99_; double b_muz4; double _t_100_; double _t_103_; double _t_105_; double _t_104_; double _t_102_; double _t_106_; double _t_108_; double _t_107_; double _t_109_; double _t_111_; double _t_110_; double _t_112_; double _t_114_; double _t_113_; double _t_101_; double _t_116_; double _t_115_; double _t_117_; double _t_118_; double _t_119_; double _t_121_; double _t_120_; double _t_122_; double _t_123_; double _t_124_; double r2; double _t_128_; double _t_126_; double _t_129_; double _t_130_; double _t_127_; double _t_132_; double _t_133_; double _t_131_; double _t_135_; double _t_136_; double _t_134_; double _t_137_; double _t_138_; double _t_125_; double _t_141_; double _t_139_; double _t_142_; double _t_143_; double _t_140_; double _t_145_; double _t_146_; double _t_144_; double _t_148_; double _t_149_; double _t_147_; double _t_150_; double _t_151_; double _t_154_; double _t_152_; double _t_155_; double _t_156_; double _t_153_; double _t_158_; double _t_159_; double _t_157_; double _t_161_; double _t_162_; double _t_160_; double _t_163_; double _t_164_; double _t_167_; double _t_165_; double _t_168_; double _t_169_; double _t_166_; double _t_171_; double _t_172_; double _t_170_; double _t_174_; double _t_175_; double _t_173_; double _t_176_; double _t_177_; double uacc_0kp1jc0ic0 = uacc_0[(k+1)*N*N+j*N+i]; a_mux1 = mu[k][j][i-1] * strx[i-1]; _t_0_ = mu[k][j][i] * strx[i]; _t_0_ += mu[k][j][i-2] * strx[i-2]; a_mux1 -= 3.0 / 4.0 * _t_0_; a_mux2 = mu[k][j][i-2] * strx[i-2]; a_mux2 += mu[k][j][i+1] * strx[i+1]; _t_1_ = mu[k][j][i] * strx[i]; _t_1_ += mu[k][j][i-1] * strx[i-1]; a_mux2 += 3.0 * _t_1_; a_mux3 = mu[k][j][i-1] * strx[i-1]; a_mux3 += mu[k][j][i+2] * strx[i+2]; _t_2_ = mu[k][j][i+1] * strx[i+1]; _t_2_ += mu[k][j][i] * strx[i]; a_mux3 += 3.0 * _t_2_; a_mux4 = mu[k][j][i+1] * strx[i+1]; _t_3_ = mu[k][j][i] * strx[i]; _t_3_ += mu[k][j][i+2] * strx[i+2]; a_mux4 -= 3.0 / 4.0 * _t_3_; a_muy1 = mu[k][j-1][i] * stry[j-1]; _t_4_ = mu[k][j][i] * stry[j]; _t_4_ += mu[k][j-2][i] * stry[j-2]; a_muy1 -= 3.0 / 4.0 * _t_4_; a_muy2 = mu[k][j-2][i] * stry[j-2]; a_muy2 += mu[k][j+1][i] * stry[j+1]; _t_5_ = mu[k][j][i] * stry[j]; _t_5_ += mu[k][j-1][i] * stry[j-1]; a_muy2 += 3.0 * _t_5_; a_muy3 = mu[k][j-1][i] * stry[j-1]; a_muy3 += mu[k][j+2][i] * stry[j+2]; _t_6_ = mu[k][j+1][i] * stry[j+1]; _t_6_ += mu[k][j][i] * stry[j]; a_muy3 += 3.0 * _t_6_; a_muy4 = mu[k][j+1][i] * stry[j+1]; _t_7_ = mu[k][j][i] * stry[j]; _t_7_ += mu[k][j+2][i] * stry[j+2]; a_muy4 -= 3.0 / 4.0 * _t_7_; a_muz1 = mu[k-1][j][i] * strz[k-1]; _t_8_ = mu[k][j][i] * strz[k]; _t_8_ += mu[k-2][j][i] * strz[k-2]; a_muz1 -= 3.0 / 4.0 * _t_8_; a_muz2 = mu[k-2][j][i] * strz[k-2]; a_muz2 += mu[k+1][j][i] * strz[k+1]; _t_9_ = mu[k][j][i] * strz[k]; _t_9_ += mu[k-1][j][i] * strz[k-1]; a_muz2 += 3.0 * _t_9_; a_muz3 = mu[k-1][j][i] * strz[k-1]; a_muz3 += mu[k+2][j][i] * strz[k+2]; _t_10_ = mu[k+1][j][i] * strz[k+1]; _t_10_ += mu[k][j][i] * strz[k]; a_muz3 += 3.0 * _t_10_; a_muz4 = mu[k+1][j][i] * strz[k+1]; _t_11_ = mu[k][j][i] * strz[k]; _t_11_ += mu[k+2][j][i] * strz[k+2]; a_muz4 -= 3.0 / 4.0 * _t_11_; _t_14_ = 2.0 * a_mux1; _t_14_ += la[k][j][i-1] * strx[i-1]; _t_16_ = la[k][j][i] * strx[i]; _t_16_ += la[k][j][i-2] * strx[i-2]; _t_14_ -= 3.0 / 4.0 * _t_16_; _t_15_ = u_0[k][j][i-2]; _t_15_ -= u_0[k][j][i]; _t_13_ = _t_14_ * _t_15_; _t_17_ = 2.0 * a_mux2; _t_17_ += la[k][j][i-2] * strx[i-2]; _t_17_ += la[k][j][i+1] * strx[i+1]; _t_19_ = la[k][j][i] * strx[i]; _t_19_ += la[k][j][i-1] * strx[i-1]; _t_17_ += 3.0 * _t_19_; _t_18_ = u_0[k][j][i-1]; _t_18_ -= u_0[k][j][i]; _t_13_ += _t_17_ * _t_18_; _t_20_ = 2.0 * a_mux3; _t_20_ += la[k][j][i-1] * strx[i-1]; _t_20_ += la[k][j][i+2] * strx[i+2]; _t_22_ = la[k][j][i+1] * strx[i+1]; _t_22_ += la[k][j][i] * strx[i]; _t_20_ += 3.0 * _t_22_; _t_21_ = u_0[k][j][i+1]; _t_21_ -= u_0[k][j][i]; _t_13_ += _t_20_ * _t_21_; _t_23_ = 2.0 * a_mux4; _t_23_ += la[k][j][i+1] * strx[i+1]; _t_25_ = la[k][j][i] * strx[i]; _t_25_ += la[k][j][i+2] * strx[i+2]; _t_23_ -= 3.0 / 4.0 * _t_25_; _t_24_ = u_0[k][j][i+2]; _t_24_ -= u_0[k][j][i]; _t_13_ += _t_23_ * _t_24_; _t_12_ = strx[i] * _t_13_; _t_27_ = u_0[k][j-2][i]; _t_27_ -= u_0[k][j][i]; _t_26_ = a_muy1 * _t_27_; _t_28_ = u_0[k][j-1][i]; _t_28_ -= u_0[k][j][i]; _t_26_ += a_muy2 * _t_28_; _t_29_ = u_0[k][j+1][i]; _t_29_ -= u_0[k][j][i]; _t_26_ += a_muy3 * _t_29_; _t_30_ = u_0[k][j+2][i]; _t_30_ -= u_0[k][j][i]; _t_26_ += a_muy4 * _t_30_; _t_12_ += stry[j] * _t_26_; _t_32_ = u_0[k-2][j][i]; _t_32_ -= u_0[k][j][i]; _t_31_ = a_muz1 * _t_32_; _t_33_ = u_0[k-1][j][i]; _t_33_ -= u_0[k][j][i]; _t_31_ += a_muz2 * _t_33_; _t_34_ = u_0[k+1][j][i]; _t_34_ -= u_0[k][j][i]; _t_31_ += a_muz3 * _t_34_; _t_35_ = u_0[k+2][j][i]; _t_35_ -= u_0[k][j][i]; _t_31_ += a_muz4 * _t_35_; _t_12_ += strz[k] * _t_31_; r1 = 1.0 / 6.0 * _t_12_; _t_39_ = strx[i] * stry[j]; _t_37_ = _t_39_ * 1.0 / 144.0; _t_40_ = u_1[k][j-2][i-2]; _t_40_ -= u_1[k][j+2][i-2]; _t_41_ = -u_1[k][j-1][i-2]; _t_41_ += u_1[k][j+1][i-2]; _t_40_ += 8.0 * _t_41_; _t_38_ = la[k][j][i-2] * _t_40_; _t_43_ = u_1[k][j-2][i-1]; _t_43_ -= u_1[k][j+2][i-1]; _t_44_ = -u_1[k][j-1][i-1]; _t_44_ += u_1[k][j+1][i-1]; _t_43_ += 8.0 * _t_44_; _t_42_ = la[k][j][i-1] * _t_43_; _t_38_ -= 8.0 * _t_42_; _t_46_ = u_1[k][j-2][i+1]; _t_46_ -= u_1[k][j+2][i+1]; _t_47_ = -u_1[k][j-1][i+1]; _t_47_ += u_1[k][j+1][i+1]; _t_46_ += 8.0 * _t_47_; _t_45_ = la[k][j][i+1] * _t_46_; _t_38_ += 8.0 * _t_45_; _t_48_ = u_1[k][j-2][i+2]; _t_48_ -= u_1[k][j+2][i+2]; _t_49_ = -u_1[k][j-1][i+2]; _t_49_ += u_1[k][j+1][i+2]; _t_48_ += 8.0 * _t_49_; _t_38_ -= la[k][j][i+2] * _t_48_; _t_36_ = _t_37_ * _t_38_; _t_52_ = strx[i] * strz[k]; _t_50_ = _t_52_ * 1.0 / 144.0; _t_53_ = u_2[k-2][j][i-2]; _t_53_ -= u_2[k+2][j][i-2]; _t_54_ = -u_2[k-1][j][i-2]; _t_54_ += u_2[k+1][j][i-2]; _t_53_ += 8.0 * _t_54_; _t_51_ = la[k][j][i-2] * _t_53_; _t_56_ = u_2[k-2][j][i-1]; _t_56_ -= u_2[k+2][j][i-1]; _t_57_ = -u_2[k-1][j][i-1]; _t_57_ += u_2[k+1][j][i-1]; _t_56_ += 8.0 * _t_57_; _t_55_ = la[k][j][i-1] * _t_56_; _t_51_ -= 8.0 * _t_55_; _t_59_ = u_2[k-2][j][i+1]; _t_59_ -= u_2[k+2][j][i+1]; _t_60_ = -u_2[k-1][j][i+1]; _t_60_ += u_2[k+1][j][i+1]; _t_59_ += 8.0 * _t_60_; _t_58_ = la[k][j][i+1] * _t_59_; _t_51_ += 8.0 * _t_58_; _t_61_ = u_2[k-2][j][i+2]; _t_61_ -= u_2[k+2][j][i+2]; _t_62_ = -u_2[k-1][j][i+2]; _t_62_ += u_2[k+1][j][i+2]; _t_61_ += 8.0 * _t_62_; _t_51_ -= la[k][j][i+2] * _t_61_; _t_36_ += _t_50_ * _t_51_; _t_65_ = strx[i] * stry[j]; _t_63_ = _t_65_ * 1.0 / 144.0; _t_66_ = u_1[k][j-2][i-2]; _t_66_ -= u_1[k][j-2][i+2]; _t_67_ = -u_1[k][j-2][i-1]; _t_67_ += u_1[k][j-2][i+1]; _t_66_ += 8.0 * _t_67_; _t_64_ = mu[k][j-2][i] * _t_66_; _t_69_ = u_1[k][j-1][i-2]; _t_69_ -= u_1[k][j-1][i+2]; _t_70_ = -u_1[k][j-1][i-1]; _t_70_ += u_1[k][j-1][i+1]; _t_69_ += 8.0 * _t_70_; _t_68_ = mu[k][j-1][i] * _t_69_; _t_64_ -= 8.0 * _t_68_; _t_72_ = u_1[k][j+1][i-2]; _t_72_ -= u_1[k][j+1][i+2]; _t_73_ = -u_1[k][j+1][i-1]; _t_73_ += u_1[k][j+1][i+1]; _t_72_ += 8.0 * _t_73_; _t_71_ = mu[k][j+1][i] * _t_72_; _t_64_ += 8.0 * _t_71_; _t_74_ = u_1[k][j+2][i-2]; _t_74_ -= u_1[k][j+2][i+2]; _t_75_ = -u_1[k][j+2][i-1]; _t_75_ += u_1[k][j+2][i+1]; _t_74_ += 8.0 * _t_75_; _t_64_ -= mu[k][j+2][i] * _t_74_; _t_36_ += _t_63_ * _t_64_; _t_78_ = strx[i] * strz[k]; _t_76_ = _t_78_ * 1.0 / 144.0; _t_79_ = u_2[k-2][j][i-2]; _t_79_ -= u_2[k-2][j][i+2]; _t_80_ = -u_2[k-2][j][i-1]; _t_80_ += u_2[k-2][j][i+1]; _t_79_ += 8.0 * _t_80_; _t_77_ = mu[k-2][j][i] * _t_79_; _t_82_ = u_2[k-1][j][i-2]; _t_82_ -= u_2[k-1][j][i+2]; _t_83_ = -u_2[k-1][j][i-1]; _t_83_ += u_2[k-1][j][i+1]; _t_82_ += 8.0 * _t_83_; _t_81_ = mu[k-1][j][i] * _t_82_; _t_77_ -= 8.0 * _t_81_; _t_85_ = u_2[k+1][j][i-2]; _t_85_ -= u_2[k+1][j][i+2]; _t_86_ = -u_2[k+1][j][i-1]; _t_86_ += u_2[k+1][j][i+1]; _t_85_ += 8.0 * _t_86_; _t_84_ = mu[k+1][j][i] * _t_85_; _t_77_ += 8.0 * _t_84_; _t_87_ = u_2[k+2][j][i-2]; _t_87_ -= u_2[k+2][j][i+2]; _t_88_ = -u_2[k+2][j][i-1]; _t_88_ += u_2[k+2][j][i+1]; _t_87_ += 8.0 * _t_88_; _t_77_ -= mu[k+2][j][i] * _t_87_; _t_36_ += _t_76_ * _t_77_; r1 += _t_36_; uacc_0kc0jc0ic0 = a1 * uacc_0kc0jc0ic0; uacc_0kc0jc0ic0 += cof * r1; uacc_0[k*N*N+j*N+i] = uacc_0kc0jc0ic0; b_mux1 = mu[k+1][j][i-1] * strx[i-1]; _t_89_ = mu[k+1][j][i] * strx[i]; _t_89_ += mu[k+1][j][i-2] * strx[i-2]; b_mux1 -= 3.0 / 4.0 * _t_89_; b_mux2 = mu[k+1][j][i-2] * strx[i-2]; b_mux2 += mu[k+1][j][i+1] * strx[i+1]; _t_90_ = mu[k+1][j][i] * strx[i]; _t_90_ += mu[k+1][j][i-1] * strx[i-1]; b_mux2 += 3.0 * _t_90_; b_mux3 = mu[k+1][j][i-1] * strx[i-1]; b_mux3 += mu[k+1][j][i+2] * strx[i+2]; _t_91_ = mu[k+1][j][i+1] * strx[i+1]; _t_91_ += mu[k+1][j][i] * strx[i]; b_mux3 += 3.0 * _t_91_; b_mux4 = mu[k+1][j][i+1] * strx[i+1]; _t_92_ = mu[k+1][j][i] * strx[i]; _t_92_ += mu[k+1][j][i+2] * strx[i+2]; b_mux4 -= 3.0 / 4.0 * _t_92_; b_muy1 = mu[k+1][j-1][i] * stry[j-1]; _t_93_ = mu[k+1][j][i] * stry[j]; _t_93_ += mu[k+1][j-2][i] * stry[j-2]; b_muy1 -= 3.0 / 4.0 * _t_93_; b_muy2 = mu[k+1][j-2][i] * stry[j-2]; b_muy2 += mu[k+1][j+1][i] * stry[j+1]; _t_94_ = mu[k+1][j][i] * stry[j]; _t_94_ += mu[k+1][j-1][i] * stry[j-1]; b_muy2 += 3.0 * _t_94_; b_muy3 = mu[k+1][j-1][i] * stry[j-1]; b_muy3 += mu[k+1][j+2][i] * stry[j+2]; _t_95_ = mu[k+1][j+1][i] * stry[j+1]; _t_95_ += mu[k+1][j][i] * stry[j]; b_muy3 += 3.0 * _t_95_; b_muy4 = mu[k+1][j+1][i] * stry[j+1]; _t_96_ = mu[k+1][j][i] * stry[j]; _t_96_ += mu[k+1][j+2][i] * stry[j+2]; b_muy4 -= 3.0 / 4.0 * _t_96_; b_muz1 = mu[k][j][i] * strz[k]; _t_97_ = mu[k+1][j][i] * strz[k+1]; _t_97_ += mu[k-1][j][i] * strz[k-1]; b_muz1 -= 3.0 / 4.0 * _t_97_; b_muz2 = mu[k-1][j][i] * strz[k-1]; b_muz2 += mu[k+2][j][i] * strz[k+2]; _t_98_ = mu[k+1][j][i] * strz[k+1]; _t_98_ += mu[k][j][i] * strz[k]; b_muz2 += 3.0 * _t_98_; b_muz3 = mu[k][j][i] * strz[k]; b_muz3 += mu[k+3][j][i] * strz[k+3]; _t_99_ = mu[k+2][j][i] * strz[k+2]; _t_99_ += mu[k+1][j][i] * strz[k+1]; b_muz3 += 3.0 * _t_99_; b_muz4 = mu[k+2][j][i] * strz[k+2]; _t_100_ = mu[k+1][j][i] * strz[k+1]; _t_100_ += mu[k+3][j][i] * strz[k+3]; b_muz4 -= 3.0 / 4.0 * _t_100_; _t_103_ = 2.0 * b_mux1; _t_103_ += la[k+1][j][i-1] * strx[i-1]; _t_105_ = la[k+1][j][i] * strx[i]; _t_105_ += la[k+1][j][i-2] * strx[i-2]; _t_103_ -= 3.0 / 4.0 * _t_105_; _t_104_ = u_0[k+1][j][i-2]; _t_104_ -= u_0[k+1][j][i]; _t_102_ = _t_103_ * _t_104_; _t_106_ = 2.0 * b_mux2; _t_106_ += la[k+1][j][i-2] * strx[i-2]; _t_106_ += la[k+1][j][i+1] * strx[i+1]; _t_108_ = la[k+1][j][i] * strx[i]; _t_108_ += la[k+1][j][i-1] * strx[i-1]; _t_106_ += 3.0 * _t_108_; _t_107_ = u_0[k+1][j][i-1]; _t_107_ -= u_0[k+1][j][i]; _t_102_ += _t_106_ * _t_107_; _t_109_ = 2.0 * b_mux3; _t_109_ += la[k+1][j][i-1] * strx[i-1]; _t_109_ += la[k+1][j][i+2] * strx[i+2]; _t_111_ = la[k+1][j][i+1] * strx[i+1]; _t_111_ += la[k+1][j][i] * strx[i]; _t_109_ += 3.0 * _t_111_; _t_110_ = u_0[k+1][j][i+1]; _t_110_ -= u_0[k+1][j][i]; _t_102_ += _t_109_ * _t_110_; _t_112_ = 2.0 * b_mux4; _t_112_ += la[k+1][j][i+1] * strx[i+1]; _t_114_ = la[k+1][j][i] * strx[i]; _t_114_ += la[k+1][j][i+2] * strx[i+2]; _t_112_ -= 3.0 / 4.0 * _t_114_; _t_113_ = u_0[k+1][j][i+2]; _t_113_ -= u_0[k+1][j][i]; _t_102_ += _t_112_ * _t_113_; _t_101_ = strx[i] * _t_102_; _t_116_ = u_0[k+1][j-2][i]; _t_116_ -= u_0[k+1][j][i]; _t_115_ = b_muy1 * _t_116_; _t_117_ = u_0[k+1][j-1][i]; _t_117_ -= u_0[k+1][j][i]; _t_115_ += b_muy2 * _t_117_; _t_118_ = u_0[k+1][j+1][i]; _t_118_ -= u_0[k+1][j][i]; _t_115_ += b_muy3 * _t_118_; _t_119_ = u_0[k+1][j+2][i]; _t_119_ -= u_0[k+1][j][i]; _t_115_ += b_muy4 * _t_119_; _t_101_ += stry[j] * _t_115_; _t_121_ = u_0[k-1][j][i]; _t_121_ -= u_0[k+1][j][i]; _t_120_ = b_muz1 * _t_121_; _t_122_ = u_0[k][j][i]; _t_122_ -= u_0[k+1][j][i]; _t_120_ += b_muz2 * _t_122_; _t_123_ = u_0[k+2][j][i]; _t_123_ -= u_0[k+1][j][i]; _t_120_ += b_muz3 * _t_123_; _t_124_ = u_0[k+3][j][i]; _t_124_ -= u_0[k+1][j][i]; _t_120_ += b_muz4 * _t_124_; _t_101_ += strz[k+1] * _t_120_; r2 = 1.0 / 6.0 * _t_101_; _t_128_ = strx[i] * stry[j]; _t_126_ = _t_128_ * 1.0 / 144.0; _t_129_ = u_1[k+1][j-2][i-2]; _t_129_ -= u_1[k+1][j+2][i-2]; _t_130_ = -u_1[k+1][j-1][i-2]; _t_130_ += u_1[k+1][j+1][i-2]; _t_129_ += 8.0 * _t_130_; _t_127_ = la[k+1][j][i-2] * _t_129_; _t_132_ = u_1[k+1][j-2][i-1]; _t_132_ -= u_1[k+1][j+2][i-1]; _t_133_ = -u_1[k+1][j-1][i-1]; _t_133_ += u_1[k+1][j+1][i-1]; _t_132_ += 8.0 * _t_133_; _t_131_ = la[k+1][j][i-1] * _t_132_; _t_127_ -= 8.0 * _t_131_; _t_135_ = u_1[k+1][j-2][i+1]; _t_135_ -= u_1[k+1][j+2][i+1]; _t_136_ = -u_1[k+1][j-1][i+1]; _t_136_ += u_1[k+1][j+1][i+1]; _t_135_ += 8.0 * _t_136_; _t_134_ = la[k+1][j][i+1] * _t_135_; _t_127_ += 8.0 * _t_134_; _t_137_ = u_1[k+1][j-2][i+2]; _t_137_ -= u_1[k+1][j+2][i+2]; _t_138_ = -u_1[k+1][j-1][i+2]; _t_138_ += u_1[k+1][j+1][i+2]; _t_137_ += 8.0 * _t_138_; _t_127_ -= la[k+1][j][i+2] * _t_137_; _t_125_ = _t_126_ * _t_127_; _t_141_ = strx[i] * strz[k+1]; _t_139_ = _t_141_ * 1.0 / 144.0; _t_142_ = u_2[k-1][j][i-2]; _t_142_ -= u_2[k+3][j][i-2]; _t_143_ = -u_2[k][j][i-2]; _t_143_ += u_2[k+2][j][i-2]; _t_142_ += 8.0 * _t_143_; _t_140_ = la[k+1][j][i-2] * _t_142_; _t_145_ = u_2[k-1][j][i-1]; _t_145_ -= u_2[k+3][j][i-1]; _t_146_ = -u_2[k][j][i-1]; _t_146_ += u_2[k+2][j][i-1]; _t_145_ += 8.0 * _t_146_; _t_144_ = la[k+1][j][i-1] * _t_145_; _t_140_ -= 8.0 * _t_144_; _t_148_ = u_2[k-1][j][i+1]; _t_148_ -= u_2[k+3][j][i+1]; _t_149_ = -u_2[k][j][i+1]; _t_149_ += u_2[k+2][j][i+1]; _t_148_ += 8.0 * _t_149_; _t_147_ = la[k+1][j][i+1] * _t_148_; _t_140_ += 8.0 * _t_147_; _t_150_ = u_2[k-1][j][i+2]; _t_150_ -= u_2[k+3][j][i+2]; _t_151_ = -u_2[k][j][i+2]; _t_151_ += u_2[k+2][j][i+2]; _t_150_ += 8.0 * _t_151_; _t_140_ -= la[k+1][j][i+2] * _t_150_; _t_125_ += _t_139_ * _t_140_; _t_154_ = strx[i] * stry[j]; _t_152_ = _t_154_ * 1.0 / 144.0; _t_155_ = u_1[k+1][j-2][i-2]; _t_155_ -= u_1[k+1][j-2][i+2]; _t_156_ = -u_1[k+1][j-2][i-1]; _t_156_ += u_1[k+1][j-2][i+1]; _t_155_ += 8.0 * _t_156_; _t_153_ = mu[k+1][j-2][i] * _t_155_; _t_158_ = u_1[k+1][j-1][i-2]; _t_158_ -= u_1[k+1][j-1][i+2]; _t_159_ = -u_1[k+1][j-1][i-1]; _t_159_ += u_1[k+1][j-1][i+1]; _t_158_ += 8.0 * _t_159_; _t_157_ = mu[k+1][j-1][i] * _t_158_; _t_153_ -= 8.0 * _t_157_; _t_161_ = u_1[k+1][j+1][i-2]; _t_161_ -= u_1[k+1][j+1][i+2]; _t_162_ = -u_1[k+1][j+1][i-1]; _t_162_ += u_1[k+1][j+1][i+1]; _t_161_ += 8.0 * _t_162_; _t_160_ = mu[k+1][j+1][i] * _t_161_; _t_153_ += 8.0 * _t_160_; _t_163_ = u_1[k+1][j+2][i-2]; _t_163_ -= u_1[k+1][j+2][i+2]; _t_164_ = -u_1[k+1][j+2][i-1]; _t_164_ += u_1[k+1][j+2][i+1]; _t_163_ += 8.0 * _t_164_; _t_153_ -= mu[k+1][j+2][i] * _t_163_; _t_125_ += _t_152_ * _t_153_; _t_167_ = strx[i] * strz[k+1]; _t_165_ = _t_167_ * 1.0 / 144.0; _t_168_ = u_2[k-1][j][i-2]; _t_168_ -= u_2[k-1][j][i+2]; _t_169_ = -u_2[k-1][j][i-1]; _t_169_ += u_2[k-1][j][i+1]; _t_168_ += 8.0 * _t_169_; _t_166_ = mu[k-1][j][i] * _t_168_; _t_171_ = u_2[k][j][i-2]; _t_171_ -= u_2[k][j][i+2]; _t_172_ = -u_2[k][j][i-1]; _t_172_ += u_2[k][j][i+1]; _t_171_ += 8.0 * _t_172_; _t_170_ = mu[k][j][i] * _t_171_; _t_166_ -= 8.0 * _t_170_; _t_174_ = u_2[k+2][j][i-2]; _t_174_ -= u_2[k+2][j][i+2]; _t_175_ = -u_2[k+2][j][i-1]; _t_175_ += u_2[k+2][j][i+1]; _t_174_ += 8.0 * _t_175_; _t_173_ = mu[k+2][j][i] * _t_174_; _t_166_ += 8.0 * _t_173_; _t_176_ = u_2[k+3][j][i-2]; _t_176_ -= u_2[k+3][j][i+2]; _t_177_ = -u_2[k+3][j][i-1]; _t_177_ += u_2[k+3][j][i+1]; _t_176_ += 8.0 * _t_177_; _t_166_ -= mu[k+3][j][i] * _t_176_; _t_125_ += _t_165_ * _t_166_; r2 += _t_125_; uacc_0kp1jc0ic0 = a1 * uacc_0kp1jc0ic0; uacc_0kp1jc0ic0 += cof * r2; uacc_0[(k+1)*N*N+j*N+i] = uacc_0kp1jc0ic0; } } } __global__ void __launch_bounds__ (128,2) sw4_b (double * uacc_1, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * __restrict__ strx, double * __restrict__ stry, double * __restrict__ strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; /* Total 687 flops */ if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 2 for (int k=2; k<=N-3; k+=2) { double a_mux1; double _t_0_; double a_mux2; double _t_1_; double a_mux3; double _t_2_; double a_mux4; double _t_3_; double a_muy1; double _t_4_; double a_muy2; double _t_5_; double a_muy3; double _t_6_; double a_muy4; double _t_7_; double a_muz1; double _t_8_; double a_muz2; double _t_9_; double a_muz3; double _t_10_; double a_muz4; double _t_11_; double _t_14_; double _t_13_; double _t_15_; double _t_16_; double _t_17_; double _t_12_; double _t_19_; double _t_21_; double _t_20_; double _t_18_; double _t_22_; double _t_24_; double _t_23_; double _t_25_; double _t_27_; double _t_26_; double _t_28_; double _t_30_; double _t_29_; double _t_32_; double _t_31_; double _t_33_; double _t_34_; double _t_35_; double r2; double _t_39_; double _t_37_; double _t_40_; double _t_41_; double _t_38_; double _t_43_; double _t_44_; double _t_42_; double _t_46_; double _t_47_; double _t_45_; double _t_48_; double _t_49_; double _t_36_; double _t_52_; double _t_50_; double _t_53_; double _t_54_; double _t_51_; double _t_56_; double _t_57_; double _t_55_; double _t_59_; double _t_60_; double _t_58_; double _t_61_; double _t_62_; double _t_65_; double _t_63_; double _t_66_; double _t_67_; double _t_64_; double _t_69_; double _t_70_; double _t_68_; double _t_72_; double _t_73_; double _t_71_; double _t_74_; double _t_75_; double _t_78_; double _t_76_; double _t_79_; double _t_80_; double _t_77_; double _t_82_; double _t_83_; double _t_81_; double _t_85_; double _t_86_; double _t_84_; double _t_87_; double _t_88_; double uacc_1kc0jc0ic0 = uacc_1[k*N*N+j*N+i]; double b_mux1; double _t_89_; double b_mux2; double _t_90_; double b_mux3; double _t_91_; double b_mux4; double _t_92_; double b_muy1; double _t_93_; double b_muy2; double _t_94_; double b_muy3; double _t_95_; double b_muy4; double _t_96_; double b_muz1; double _t_97_; double b_muz2; double _t_98_; double b_muz3; double _t_99_; double b_muz4; double _t_100_; double _t_103_; double _t_102_; double _t_104_; double _t_105_; double _t_106_; double _t_101_; double _t_108_; double _t_110_; double _t_109_; double _t_107_; double _t_111_; double _t_113_; double _t_112_; double _t_114_; double _t_116_; double _t_115_; double _t_117_; double _t_119_; double _t_118_; double _t_121_; double _t_120_; double _t_122_; double _t_123_; double _t_124_; double r3; double _t_128_; double _t_126_; double _t_129_; double _t_130_; double _t_127_; double _t_132_; double _t_133_; double _t_131_; double _t_135_; double _t_136_; double _t_134_; double _t_137_; double _t_138_; double _t_125_; double _t_141_; double _t_139_; double _t_142_; double _t_143_; double _t_140_; double _t_145_; double _t_146_; double _t_144_; double _t_148_; double _t_149_; double _t_147_; double _t_150_; double _t_151_; double _t_154_; double _t_152_; double _t_155_; double _t_156_; double _t_153_; double _t_158_; double _t_159_; double _t_157_; double _t_161_; double _t_162_; double _t_160_; double _t_163_; double _t_164_; double _t_167_; double _t_165_; double _t_168_; double _t_169_; double _t_166_; double _t_171_; double _t_172_; double _t_170_; double _t_174_; double _t_175_; double _t_173_; double _t_176_; double _t_177_; double uacc_1kp1jc0ic0 = uacc_1[(k+1)*N*N+j*N+i]; a_mux1 = mu[k][j][i-1] * strx[i-1]; _t_0_ = mu[k][j][i] * strx[i]; _t_0_ += mu[k][j][i-2] * strx[i-2]; a_mux1 -= 3.0 / 4.0 * _t_0_; a_mux2 = mu[k][j][i-2] * strx[i-2]; a_mux2 += mu[k][j][i+1] * strx[i+1]; _t_1_ = mu[k][j][i] * strx[i]; _t_1_ += mu[k][j][i-1] * strx[i-1]; a_mux2 += 3.0 * _t_1_; a_mux3 = mu[k][j][i-1] * strx[i-1]; a_mux3 += mu[k][j][i+2] * strx[i+2]; _t_2_ = mu[k][j][i+1] * strx[i+1]; _t_2_ += mu[k][j][i] * strx[i]; a_mux3 += 3.0 * _t_2_; a_mux4 = mu[k][j][i+1] * strx[i+1]; _t_3_ = mu[k][j][i] * strx[i]; _t_3_ += mu[k][j][i+2] * strx[i+2]; a_mux4 -= 3.0 / 4.0 * _t_3_; a_muy1 = mu[k][j-1][i] * stry[j-1]; _t_4_ = mu[k][j][i] * stry[j]; _t_4_ += mu[k][j-2][i] * stry[j-2]; a_muy1 -= 3.0 / 4.0 * _t_4_; a_muy2 = mu[k][j-2][i] * stry[j-2]; a_muy2 += mu[k][j+1][i] * stry[j+1]; _t_5_ = mu[k][j][i] * stry[j]; _t_5_ += mu[k][j-1][i] * stry[j-1]; a_muy2 += 3.0 * _t_5_; a_muy3 = mu[k][j-1][i] * stry[j-1]; a_muy3 += mu[k][j+2][i] * stry[j+2]; _t_6_ = mu[k][j+1][i] * stry[j+1]; _t_6_ += mu[k][j][i] * stry[j]; a_muy3 += 3.0 * _t_6_; a_muy4 = mu[k][j+1][i] * stry[j+1]; _t_7_ = mu[k][j][i] * stry[j]; _t_7_ += mu[k][j+2][i] * stry[j+2]; a_muy4 -= 3.0 / 4.0 * _t_7_; a_muz1 = mu[k-1][j][i] * strz[k-1]; _t_8_ = mu[k][j][i] * strz[k]; _t_8_ += mu[k-2][j][i] * strz[k-2]; a_muz1 -= 3.0 / 4.0 * _t_8_; a_muz2 = mu[k-2][j][i] * strz[k-2]; a_muz2 += mu[k+1][j][i] * strz[k+1]; _t_9_ = mu[k][j][i] * strz[k]; _t_9_ += mu[k-1][j][i] * strz[k-1]; a_muz2 += 3.0 * _t_9_; a_muz3 = mu[k-1][j][i] * strz[k-1]; a_muz3 += mu[k+2][j][i] * strz[k+2]; _t_10_ = mu[k+1][j][i] * strz[k+1]; _t_10_ += mu[k][j][i] * strz[k]; a_muz3 += 3.0 * _t_10_; a_muz4 = mu[k+1][j][i] * strz[k+1]; _t_11_ = mu[k][j][i] * strz[k]; _t_11_ += mu[k+2][j][i] * strz[k+2]; a_muz4 -= 3.0 / 4.0 * _t_11_; _t_14_ = u_1[k][j][i-2]; _t_14_ -= u_1[k][j][i]; _t_13_ = a_mux1 * _t_14_; _t_15_ = u_1[k][j][i-1]; _t_15_ -= u_1[k][j][i]; _t_13_ += a_mux2 * _t_15_; _t_16_ = u_1[k][j][i+1]; _t_16_ -= u_1[k][j][i]; _t_13_ += a_mux3 * _t_16_; _t_17_ = u_1[k][j][i+2]; _t_17_ -= u_1[k][j][i]; _t_13_ += a_mux4 * _t_17_; _t_12_ = strx[i] * _t_13_; _t_19_ = 2.0 * a_muy1; _t_19_ += la[k][j-1][i] * stry[j-1]; _t_21_ = la[k][j][i] * stry[j]; _t_21_ += la[k][j-2][i] * stry[j-2]; _t_19_ -= 3.0 / 4.0 * _t_21_; _t_20_ = u_1[k][j-2][i]; _t_20_ -= u_1[k][j][i]; _t_18_ = _t_19_ * _t_20_; _t_22_ = 2.0 * a_muy2; _t_22_ += la[k][j-2][i] * stry[j-2]; _t_22_ += la[k][j+1][i] * stry[j+1]; _t_24_ = la[k][j][i] * stry[j]; _t_24_ += la[k][j-1][i] * stry[j-1]; _t_22_ += 3.0 * _t_24_; _t_23_ = u_1[k][j-1][i]; _t_23_ -= u_1[k][j][i]; _t_18_ += _t_22_ * _t_23_; _t_25_ = 2.0 * a_muy3; _t_25_ += la[k][j-1][i] * stry[j-1]; _t_25_ += la[k][j+2][i] * stry[j+2]; _t_27_ = la[k][j+1][i] * stry[j+1]; _t_27_ += la[k][j][i] * stry[j]; _t_25_ += 3.0 * _t_27_; _t_26_ = u_1[k][j+1][i]; _t_26_ -= u_1[k][j][i]; _t_18_ += _t_25_ * _t_26_; _t_28_ = 2.0 * a_muy4; _t_28_ += la[k][j+1][i] * stry[j+1]; _t_30_ = la[k][j][i] * stry[j]; _t_30_ += la[k][j+2][i] * stry[j+2]; _t_28_ -= 3.0 / 4.0 * _t_30_; _t_29_ = u_1[k][j+2][i]; _t_29_ -= u_1[k][j][i]; _t_18_ += _t_28_ * _t_29_; _t_12_ += stry[j] * _t_18_; _t_32_ = u_1[k-2][j][i]; _t_32_ -= u_1[k][j][i]; _t_31_ = a_muz1 * _t_32_; _t_33_ = u_1[k-1][j][i]; _t_33_ -= u_1[k][j][i]; _t_31_ += a_muz2 * _t_33_; _t_34_ = u_1[k+1][j][i]; _t_34_ -= u_1[k][j][i]; _t_31_ += a_muz3 * _t_34_; _t_35_ = u_1[k+2][j][i]; _t_35_ -= u_1[k][j][i]; _t_31_ += a_muz4 * _t_35_; _t_12_ += strz[k] * _t_31_; r2 = 1.0 / 6.0 * _t_12_; _t_39_ = strx[i] * stry[j]; _t_37_ = _t_39_ * 1.0 / 144.0; _t_40_ = u_0[k][j-2][i-2]; _t_40_ -= u_0[k][j+2][i-2]; _t_41_ = -u_0[k][j-1][i-2]; _t_41_ += u_0[k][j+1][i-2]; _t_40_ += 8.0 * _t_41_; _t_38_ = mu[k][j][i-2] * _t_40_; _t_43_ = u_0[k][j-2][i-1]; _t_43_ -= u_0[k][j+2][i-1]; _t_44_ = -u_0[k][j-1][i-1]; _t_44_ += u_0[k][j+1][i-1]; _t_43_ += 8.0 * _t_44_; _t_42_ = mu[k][j][i-1] * _t_43_; _t_38_ -= 8.0 * _t_42_; _t_46_ = u_0[k][j-2][i+1]; _t_46_ -= u_0[k][j+2][i+1]; _t_47_ = -u_0[k][j-1][i+1]; _t_47_ += u_0[k][j+1][i+1]; _t_46_ += 8.0 * _t_47_; _t_45_ = mu[k][j][i+1] * _t_46_; _t_38_ += 8.0 * _t_45_; _t_48_ = u_0[k][j-2][i+2]; _t_48_ -= u_0[k][j+2][i+2]; _t_49_ = -u_0[k][j-1][i+2]; _t_49_ += u_0[k][j+1][i+2]; _t_48_ += 8.0 * _t_49_; _t_38_ -= mu[k][j][i+2] * _t_48_; _t_36_ = _t_37_ * _t_38_; _t_52_ = strx[i] * stry[j]; _t_50_ = _t_52_ * 1.0 / 144.0; _t_53_ = u_0[k][j-2][i-2]; _t_53_ -= u_0[k][j-2][i+2]; _t_54_ = -u_0[k][j-2][i-1]; _t_54_ += u_0[k][j-2][i+1]; _t_53_ += 8.0 * _t_54_; _t_51_ = la[k][j-2][i] * _t_53_; _t_56_ = u_0[k][j-1][i-2]; _t_56_ -= u_0[k][j-1][i+2]; _t_57_ = -u_0[k][j-1][i-1]; _t_57_ += u_0[k][j-1][i+1]; _t_56_ += 8.0 * _t_57_; _t_55_ = la[k][j-1][i] * _t_56_; _t_51_ -= 8.0 * _t_55_; _t_59_ = u_0[k][j+1][i-2]; _t_59_ -= u_0[k][j+1][i+2]; _t_60_ = -u_0[k][j+1][i-1]; _t_60_ += u_0[k][j+1][i+1]; _t_59_ += 8.0 * _t_60_; _t_58_ = la[k][j+1][i] * _t_59_; _t_51_ += 8.0 * _t_58_; _t_61_ = u_0[k][j+2][i-2]; _t_61_ -= u_0[k][j+2][i+2]; _t_62_ = -u_0[k][j+2][i-1]; _t_62_ += u_0[k][j+2][i+1]; _t_61_ += 8.0 * _t_62_; _t_51_ -= la[k][j+2][i] * _t_61_; _t_36_ += _t_50_ * _t_51_; _t_65_ = stry[j] * strz[k]; _t_63_ = _t_65_ * 1.0 / 144.0; _t_66_ = u_2[k-2][j-2][i]; _t_66_ -= u_2[k+2][j-2][i]; _t_67_ = -u_2[k-1][j-2][i]; _t_67_ += u_2[k+1][j-2][i]; _t_66_ += 8.0 * _t_67_; _t_64_ = la[k][j-2][i] * _t_66_; _t_69_ = u_2[k-2][j-1][i]; _t_69_ -= u_2[k+2][j-1][i]; _t_70_ = -u_2[k-1][j-1][i]; _t_70_ += u_2[k+1][j-1][i]; _t_69_ += 8.0 * _t_70_; _t_68_ = la[k][j-1][i] * _t_69_; _t_64_ -= 8.0 * _t_68_; _t_72_ = u_2[k-2][j+1][i]; _t_72_ -= u_2[k+2][j+1][i]; _t_73_ = -u_2[k-1][j+1][i]; _t_73_ += u_2[k+1][j+1][i]; _t_72_ += 8.0 * _t_73_; _t_71_ = la[k][j+1][i] * _t_72_; _t_64_ += 8.0 * _t_71_; _t_74_ = u_2[k-2][j+2][i]; _t_74_ -= u_2[k+2][j+2][i]; _t_75_ = -u_2[k-1][j+2][i]; _t_75_ += u_2[k+1][j+2][i]; _t_74_ += 8.0 * _t_75_; _t_64_ -= la[k][j+2][i] * _t_74_; _t_36_ += _t_63_ * _t_64_; _t_78_ = stry[j] * strz[k]; _t_76_ = _t_78_ * 1.0 / 144.0; _t_79_ = u_2[k-2][j-2][i]; _t_79_ -= u_2[k-2][j+2][i]; _t_80_ = -u_2[k-2][j-1][i]; _t_80_ += u_2[k-2][j+1][i]; _t_79_ += 8.0 * _t_80_; _t_77_ = mu[k-2][j][i] * _t_79_; _t_82_ = u_2[k-1][j-2][i]; _t_82_ -= u_2[k-1][j+2][i]; _t_83_ = -u_2[k-1][j-1][i]; _t_83_ += u_2[k-1][j+1][i]; _t_82_ += 8.0 * _t_83_; _t_81_ = mu[k-1][j][i] * _t_82_; _t_77_ -= 8.0 * _t_81_; _t_85_ = u_2[k+1][j-2][i]; _t_85_ -= u_2[k+1][j+2][i]; _t_86_ = -u_2[k+1][j-1][i]; _t_86_ += u_2[k+1][j+1][i]; _t_85_ += 8.0 * _t_86_; _t_84_ = mu[k+1][j][i] * _t_85_; _t_77_ += 8.0 * _t_84_; _t_87_ = u_2[k+2][j-2][i]; _t_87_ -= u_2[k+2][j+2][i]; _t_88_ = -u_2[k+2][j-1][i]; _t_88_ += u_2[k+2][j+1][i]; _t_87_ += 8.0 * _t_88_; _t_77_ -= mu[k+2][j][i] * _t_87_; _t_36_ += _t_76_ * _t_77_; r2 += _t_36_; uacc_1kc0jc0ic0 = a1 * uacc_1kc0jc0ic0; uacc_1kc0jc0ic0 += cof * r2; uacc_1[k*N*N+j*N+i] = uacc_1kc0jc0ic0; b_mux1 = mu[k+1][j][i-1] * strx[i-1]; _t_89_ = mu[k+1][j][i] * strx[i]; _t_89_ += mu[k+1][j][i-2] * strx[i-2]; b_mux1 -= 3.0 / 4.0 * _t_89_; b_mux2 = mu[k+1][j][i-2] * strx[i-2]; b_mux2 += mu[k+1][j][i+1] * strx[i+1]; _t_90_ = mu[k+1][j][i] * strx[i]; _t_90_ += mu[k+1][j][i-1] * strx[i-1]; b_mux2 += 3.0 * _t_90_; b_mux3 = mu[k+1][j][i-1] * strx[i-1]; b_mux3 += mu[k+1][j][i+2] * strx[i+2]; _t_91_ = mu[k+1][j][i+1] * strx[i+1]; _t_91_ += mu[k+1][j][i] * strx[i]; b_mux3 += 3.0 * _t_91_; b_mux4 = mu[k+1][j][i+1] * strx[i+1]; _t_92_ = mu[k+1][j][i] * strx[i]; _t_92_ += mu[k+1][j][i+2] * strx[i+2]; b_mux4 -= 3.0 / 4.0 * _t_92_; b_muy1 = mu[k+1][j-1][i] * stry[j-1]; _t_93_ = mu[k+1][j][i] * stry[j]; _t_93_ += mu[k+1][j-2][i] * stry[j-2]; b_muy1 -= 3.0 / 4.0 * _t_93_; b_muy2 = mu[k+1][j-2][i] * stry[j-2]; b_muy2 += mu[k+1][j+1][i] * stry[j+1]; _t_94_ = mu[k+1][j][i] * stry[j]; _t_94_ += mu[k+1][j-1][i] * stry[j-1]; b_muy2 += 3.0 * _t_94_; b_muy3 = mu[k+1][j-1][i] * stry[j-1]; b_muy3 += mu[k+1][j+2][i] * stry[j+2]; _t_95_ = mu[k+1][j+1][i] * stry[j+1]; _t_95_ += mu[k+1][j][i] * stry[j]; b_muy3 += 3.0 * _t_95_; b_muy4 = mu[k+1][j+1][i] * stry[j+1]; _t_96_ = mu[k+1][j][i] * stry[j]; _t_96_ += mu[k+1][j+2][i] * stry[j+2]; b_muy4 -= 3.0 / 4.0 * _t_96_; b_muz1 = mu[k][j][i] * strz[k]; _t_97_ = mu[k+1][j][i] * strz[k+1]; _t_97_ += mu[k-1][j][i] * strz[k-1]; b_muz1 -= 3.0 / 4.0 * _t_97_; b_muz2 = mu[k-1][j][i] * strz[k-1]; b_muz2 += mu[k+2][j][i] * strz[k+2]; _t_98_ = mu[k+1][j][i] * strz[k+1]; _t_98_ += mu[k][j][i] * strz[k]; b_muz2 += 3.0 * _t_98_; b_muz3 = mu[k][j][i] * strz[k]; b_muz3 += mu[k+3][j][i] * strz[k+3]; _t_99_ = mu[k+2][j][i] * strz[k+2]; _t_99_ += mu[k+1][j][i] * strz[k+1]; b_muz3 += 3.0 * _t_99_; b_muz4 = mu[k+2][j][i] * strz[k+2]; _t_100_ = mu[k+1][j][i] * strz[k+1]; _t_100_ += mu[k+3][j][i] * strz[k+3]; b_muz4 -= 3.0 / 4.0 * _t_100_; _t_103_ = u_1[k+1][j][i-2]; _t_103_ -= u_1[k+1][j][i]; _t_102_ = b_mux1 * _t_103_; _t_104_ = u_1[k+1][j][i-1]; _t_104_ -= u_1[k+1][j][i]; _t_102_ += b_mux2 * _t_104_; _t_105_ = u_1[k+1][j][i+1]; _t_105_ -= u_1[k+1][j][i]; _t_102_ += b_mux3 * _t_105_; _t_106_ = u_1[k+1][j][i+2]; _t_106_ -= u_1[k+1][j][i]; _t_102_ += b_mux4 * _t_106_; _t_101_ = strx[i] * _t_102_; _t_108_ = 2.0 * b_muy1; _t_108_ += la[k+1][j-1][i] * stry[j-1]; _t_110_ = la[k+1][j][i] * stry[j]; _t_110_ += la[k+1][j-2][i] * stry[j-2]; _t_108_ -= 3.0 / 4.0 * _t_110_; _t_109_ = u_1[k+1][j-2][i]; _t_109_ -= u_1[k+1][j][i]; _t_107_ = _t_108_ * _t_109_; _t_111_ = 2.0 * b_muy2; _t_111_ += la[k+1][j-2][i] * stry[j-2]; _t_111_ += la[k+1][j+1][i] * stry[j+1]; _t_113_ = la[k+1][j][i] * stry[j]; _t_113_ += la[k+1][j-1][i] * stry[j-1]; _t_111_ += 3.0 * _t_113_; _t_112_ = u_1[k+1][j-1][i]; _t_112_ -= u_1[k+1][j][i]; _t_107_ += _t_111_ * _t_112_; _t_114_ = 2.0 * b_muy3; _t_114_ += la[k+1][j-1][i] * stry[j-1]; _t_114_ += la[k+1][j+2][i] * stry[j+2]; _t_116_ = la[k+1][j+1][i] * stry[j+1]; _t_116_ += la[k+1][j][i] * stry[j]; _t_114_ += 3.0 * _t_116_; _t_115_ = u_1[k+1][j+1][i]; _t_115_ -= u_1[k+1][j][i]; _t_107_ += _t_114_ * _t_115_; _t_117_ = 2.0 * b_muy4; _t_117_ += la[k+1][j+1][i] * stry[j+1]; _t_119_ = la[k+1][j][i] * stry[j]; _t_119_ += la[k+1][j+2][i] * stry[j+2]; _t_117_ -= 3.0 / 4.0 * _t_119_; _t_118_ = u_1[k+1][j+2][i]; _t_118_ -= u_1[k+1][j][i]; _t_107_ += _t_117_ * _t_118_; _t_101_ += stry[j] * _t_107_; _t_121_ = u_1[k-1][j][i]; _t_121_ -= u_1[k+1][j][i]; _t_120_ = b_muz1 * _t_121_; _t_122_ = u_1[k][j][i]; _t_122_ -= u_1[k+1][j][i]; _t_120_ += b_muz2 * _t_122_; _t_123_ = u_1[k+2][j][i]; _t_123_ -= u_1[k+1][j][i]; _t_120_ += b_muz3 * _t_123_; _t_124_ = u_1[k+3][j][i]; _t_124_ -= u_1[k+1][j][i]; _t_120_ += b_muz4 * _t_124_; _t_101_ += strz[k+1] * _t_120_; r3 = 1.0 / 6.0 * _t_101_; _t_128_ = strx[i] * stry[j]; _t_126_ = _t_128_ * 1.0 / 144.0; _t_129_ = u_0[k+1][j-2][i-2]; _t_129_ -= u_0[k+1][j+2][i-2]; _t_130_ = -u_0[k+1][j-1][i-2]; _t_130_ += u_0[k+1][j+1][i-2]; _t_129_ += 8.0 * _t_130_; _t_127_ = mu[k+1][j][i-2] * _t_129_; _t_132_ = u_0[k+1][j-2][i-1]; _t_132_ -= u_0[k+1][j+2][i-1]; _t_133_ = -u_0[k+1][j-1][i-1]; _t_133_ += u_0[k+1][j+1][i-1]; _t_132_ += 8.0 * _t_133_; _t_131_ = mu[k+1][j][i-1] * _t_132_; _t_127_ -= 8.0 * _t_131_; _t_135_ = u_0[k+1][j-2][i+1]; _t_135_ -= u_0[k+1][j+2][i+1]; _t_136_ = -u_0[k+1][j-1][i+1]; _t_136_ += u_0[k+1][j+1][i+1]; _t_135_ += 8.0 * _t_136_; _t_134_ = mu[k+1][j][i+1] * _t_135_; _t_127_ += 8.0 * _t_134_; _t_137_ = u_0[k+1][j-2][i+2]; _t_137_ -= u_0[k+1][j+2][i+2]; _t_138_ = -u_0[k+1][j-1][i+2]; _t_138_ += u_0[k+1][j+1][i+2]; _t_137_ += 8.0 * _t_138_; _t_127_ -= mu[k+1][j][i+2] * _t_137_; _t_125_ = _t_126_ * _t_127_; _t_141_ = strx[i] * stry[j]; _t_139_ = _t_141_ * 1.0 / 144.0; _t_142_ = u_0[k+1][j-2][i-2]; _t_142_ -= u_0[k+1][j-2][i+2]; _t_143_ = -u_0[k+1][j-2][i-1]; _t_143_ += u_0[k+1][j-2][i+1]; _t_142_ += 8.0 * _t_143_; _t_140_ = la[k+1][j-2][i] * _t_142_; _t_145_ = u_0[k+1][j-1][i-2]; _t_145_ -= u_0[k+1][j-1][i+2]; _t_146_ = -u_0[k+1][j-1][i-1]; _t_146_ += u_0[k+1][j-1][i+1]; _t_145_ += 8.0 * _t_146_; _t_144_ = la[k+1][j-1][i] * _t_145_; _t_140_ -= 8.0 * _t_144_; _t_148_ = u_0[k+1][j+1][i-2]; _t_148_ -= u_0[k+1][j+1][i+2]; _t_149_ = -u_0[k+1][j+1][i-1]; _t_149_ += u_0[k+1][j+1][i+1]; _t_148_ += 8.0 * _t_149_; _t_147_ = la[k+1][j+1][i] * _t_148_; _t_140_ += 8.0 * _t_147_; _t_150_ = u_0[k+1][j+2][i-2]; _t_150_ -= u_0[k+1][j+2][i+2]; _t_151_ = -u_0[k+1][j+2][i-1]; _t_151_ += u_0[k+1][j+2][i+1]; _t_150_ += 8.0 * _t_151_; _t_140_ -= la[k+1][j+2][i] * _t_150_; _t_125_ += _t_139_ * _t_140_; _t_154_ = stry[j] * strz[k+1]; _t_152_ = _t_154_ * 1.0 / 144.0; _t_155_ = u_2[k-1][j-2][i]; _t_155_ -= u_2[k+3][j-2][i]; _t_156_ = -u_2[k][j-2][i]; _t_156_ += u_2[k+2][j-2][i]; _t_155_ += 8.0 * _t_156_; _t_153_ = la[k+1][j-2][i] * _t_155_; _t_158_ = u_2[k-1][j-1][i]; _t_158_ -= u_2[k+3][j-1][i]; _t_159_ = -u_2[k][j-1][i]; _t_159_ += u_2[k+2][j-1][i]; _t_158_ += 8.0 * _t_159_; _t_157_ = la[k+1][j-1][i] * _t_158_; _t_153_ -= 8.0 * _t_157_; _t_161_ = u_2[k-1][j+1][i]; _t_161_ -= u_2[k+3][j+1][i]; _t_162_ = -u_2[k][j+1][i]; _t_162_ += u_2[k+2][j+1][i]; _t_161_ += 8.0 * _t_162_; _t_160_ = la[k+1][j+1][i] * _t_161_; _t_153_ += 8.0 * _t_160_; _t_163_ = u_2[k-1][j+2][i]; _t_163_ -= u_2[k+3][j+2][i]; _t_164_ = -u_2[k][j+2][i]; _t_164_ += u_2[k+2][j+2][i]; _t_163_ += 8.0 * _t_164_; _t_153_ -= la[k+1][j+2][i] * _t_163_; _t_125_ += _t_152_ * _t_153_; _t_167_ = stry[j] * strz[k+1]; _t_165_ = _t_167_ * 1.0 / 144.0; _t_168_ = u_2[k-1][j-2][i]; _t_168_ -= u_2[k-1][j+2][i]; _t_169_ = -u_2[k-1][j-1][i]; _t_169_ += u_2[k-1][j+1][i]; _t_168_ += 8.0 * _t_169_; _t_166_ = mu[k-1][j][i] * _t_168_; _t_171_ = u_2[k][j-2][i]; _t_171_ -= u_2[k][j+2][i]; _t_172_ = -u_2[k][j-1][i]; _t_172_ += u_2[k][j+1][i]; _t_171_ += 8.0 * _t_172_; _t_170_ = mu[k][j][i] * _t_171_; _t_166_ -= 8.0 * _t_170_; _t_174_ = u_2[k+2][j-2][i]; _t_174_ -= u_2[k+2][j+2][i]; _t_175_ = -u_2[k+2][j-1][i]; _t_175_ += u_2[k+2][j+1][i]; _t_174_ += 8.0 * _t_175_; _t_173_ = mu[k+2][j][i] * _t_174_; _t_166_ += 8.0 * _t_173_; _t_176_ = u_2[k+3][j-2][i]; _t_176_ -= u_2[k+3][j+2][i]; _t_177_ = -u_2[k+3][j-1][i]; _t_177_ += u_2[k+3][j+1][i]; _t_176_ += 8.0 * _t_177_; _t_166_ -= mu[k+3][j][i] * _t_176_; _t_125_ += _t_165_ * _t_166_; r3 += _t_125_; uacc_1kp1jc0ic0 = a1 * uacc_1kp1jc0ic0; uacc_1kp1jc0ic0 += cof * r3; uacc_1[(k+1)*N*N+j*N+i] = uacc_1kp1jc0ic0; } } } __global__ void __launch_bounds__ (128,2) sw4_c (double * uacc_2, double * __restrict__ u_0, double * __restrict__ u_1, double * __restrict__ u_2, double * __restrict__ mu, double * __restrict__ la, double * __restrict__ strx, double * __restrict__ stry, double * __restrict__ strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); /* Total 687 flops */ if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 10 for (int k=2; k<=N-3; k++) { /* 28 * 3 = 84 flops */ double mux1 = mu[k*N*N+j*N+i-1] * strx[i-1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strx[i] + mu[k*N*N+j*N+i-2] * strx[i-2]); double mux2 = mu[k*N*N+j*N+i-2] * strx[i-2] + mu[k*N*N+j*N+i+1] * strx[i+1] + 3 * (mu[k*N*N+j*N+i] * strx[i] + mu[k*N*N+j*N+i-1] * strx[i-1]); double mux3 = mu[k*N*N+j*N+i-1] * strx[i-1] + mu[k*N*N+j*N+i+2] * strx[i+2] + 3 * (mu[k*N*N+j*N+i+1] * strx[i+1] + mu[k*N*N+j*N+i] * strx[i]); double mux4 = mu[k*N*N+j*N+i+1] * strx[i+1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strx[i] + mu[k*N*N+j*N+i+2] * strx[i+2]); double muy1 = mu[k*N*N+(j-1)*N+i] * stry[j-1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * stry[j] + mu[k*N*N+(j-2)*N+i] * stry[j-2]); double muy2 = mu[k*N*N+(j-2)*N+i] * stry[j-2] + mu[k*N*N+(j+1)*N+i] * stry[j+1] + 3 * (mu[k*N*N+j*N+i] * stry[j] + mu[k*N*N+(j-1)*N+i] * stry[j-1]); double muy3 = mu[k*N*N+(j-1)*N+i] * stry[j-1] + mu[k*N*N+(j+2)*N+i] * stry[j+2] + 3 * (mu[k*N*N+(j+1)*N+i] * stry[j+1] + mu[k*N*N+j*N+i] * stry[j]); double muy4 = mu[k*N*N+(j+1)*N+i] * stry[j+1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * stry[j] + mu[k*N*N+(j+2)*N+i] * stry[j+2]); double muz1 = mu[(k-1)*N*N+j*N+i] * strz[k-1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strz[k] + mu[(k-2)*N*N+j*N+i] * strz[k-2]); double muz2 = mu[(k-2)*N*N+j*N+i] * strz[k-2] + mu[(k+1)*N*N+j*N+i] * strz[k+1] + 3 * (mu[k*N*N+j*N+i] * strz[k] + mu[(k-1)*N*N+j*N+i] * strz[k-1]); double muz3 = mu[(k-1)*N*N+j*N+i] * strz[k-1] + mu[(k+2)*N*N+j*N+i] * strz[k+2] + 3 * (mu[(k+1)*N*N+j*N+i] * strz[k+1] + mu[k*N*N+j*N+i] * strz[k]); double muz4 = mu[(k+1)*N*N+j*N+i] * strz[k+1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strz[k] + mu[(k+2)*N*N+j*N+i] * strz[k+2]); double r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k*N*N+j*N+i-2] - u_2[k*N*N+j*N+i]) + mux2 * (u_2[k*N*N+j*N+i-1] - u_2[k*N*N+j*N+i]) + mux3 * (u_2[k*N*N+j*N+i+1] - u_2[k*N*N+j*N+i]) + mux4 * (u_2[k*N*N+j*N+i+2] - u_2[k*N*N+j*N+i])) + stry[j] * (muy1 * (u_2[k*N*N+(j-2)*N+i] - u_2[k*N*N+j*N+i]) + muy2 * (u_2[k*N*N+(j-1)*N+i] - u_2[k*N*N+j*N+i]) + muy3 * (u_2[k*N*N+(j+1)*N+i] - u_2[k*N*N+j*N+i]) + muy4 * (u_2[k*N*N+(j+2)*N+i] - u_2[k*N*N+j*N+i])) + strz[k] * ((2 * muz1 + la[(k-1)*N*N+j*N+i] * strz[k-1] - 3e0 / 4 * (la[k*N*N+j*N+i] * strz[k] + la[(k-2)*N*N+j*N+i] * strz[k-2])) * (u_2[(k-2)*N*N+j*N+i] - u_2[k*N*N+j*N+i]) + (2 * muz2 + la[(k-2)*N*N+j*N+i] * strz[k-2] + la[(k+1)*N*N+j*N+i] * strz[k+1] + 3 * (la[k*N*N+j*N+i] * strz[k] + la[(k-1)*N*N+j*N+i] * strz[k-1])) * (u_2[(k-1)*N*N+j*N+i] - u_2[k*N*N+j*N+i]) + (2 * muz3 + la[(k-1)*N*N+j*N+i] * strz[k-1] + la[(k+2)*N*N+j*N+i] * strz[k+2] + 3 * (la[(k+1)*N*N+j*N+i] * strz[k+1] + la[k*N*N+j*N+i] * strz[k])) * (u_2[(k+1)*N*N+j*N+i] - u_2[k*N*N+j*N+i]) + (2 * muz4 + la[(k+1)*N*N+j*N+i] * strz[k+1] - 3e0 / 4 * (la[k*N*N+j*N+i] * strz[k] + la[(k+2)*N*N+j*N+i] * strz[k+2])) * (u_2[(k+2)*N*N+j*N+i] - u_2[k*N*N+j*N+i]))); r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k*N*N+j*N+i-2] * (u_0[(k-2)*N*N+j*N+i-2] - u_0[(k+2)*N*N+j*N+i-2] + 8 * (-u_0[(k-1)*N*N+j*N+i-2] + u_0[(k+1)*N*N+j*N+i-2])) - 8 * (mu[k*N*N+j*N+i-1] * (u_0[(k-2)*N*N+j*N+i-1] - u_0[(k+2)*N*N+j*N+i-1] + 8 * (-u_0[(k-1)*N*N+j*N+i-1] + u_0[(k+1)*N*N+j*N+i-1]))) + 8 * (mu[k*N*N+j*N+i+1] * (u_0[(k-2)*N*N+j*N+i+1] - u_0[(k+2)*N*N+j*N+i+1] + 8 * (-u_0[(k-1)*N*N+j*N+i+1] + u_0[(k+1)*N*N+j*N+i+1]))) - (mu[k*N*N+j*N+i+2] * (u_0[(k-2)*N*N+j*N+i+2] - u_0[(k+2)*N*N+j*N+i+2] + 8 * (-u_0[(k-1)*N*N+j*N+i+2] + u_0[(k+1)*N*N+j*N+i+2])))); r3 += strx[i] * strz[k] * (1e0 / 144) * (la[(k-2)*N*N+j*N+i] * (u_0[(k-2)*N*N+j*N+i-2] - u_0[(k-2)*N*N+j*N+i+2] + 8 * (-u_0[(k-2)*N*N+j*N+i-1] + u_0[(k-2)*N*N+j*N+i+1])) - 8 * (la[(k-1)*N*N+j*N+i] * (u_0[(k-1)*N*N+j*N+i-2] - u_0[(k-1)*N*N+j*N+i+2] + 8 * (-u_0[(k-1)*N*N+j*N+i-1] + u_0[(k-1)*N*N+j*N+i+1]))) + 8 * (la[(k+1)*N*N+j*N+i] * (u_0[(k+1)*N*N+j*N+i-2] - u_0[(k+1)*N*N+j*N+i+2] + 8 * (-u_0[(k+1)*N*N+j*N+i-1] + u_0[(k+1)*N*N+j*N+i+1]))) - (la[(k+2)*N*N+j*N+i] * (u_0[(k+2)*N*N+j*N+i-2] - u_0[(k+2)*N*N+j*N+i+2] + 8 * (-u_0[(k+2)*N*N+j*N+i-1] + u_0[(k+2)*N*N+j*N+i+1])))); r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k*N*N+(j-2)*N+i] * (u_1[(k-2)*N*N+(j-2)*N+i] - u_1[(k+2)*N*N+(j-2)*N+i] + 8 * (-u_1[(k-1)*N*N+(j-2)*N+i] + u_1[(k+1)*N*N+(j-2)*N+i])) - 8 * (mu[k*N*N+(j-1)*N+i] * (u_1[(k-2)*N*N+(j-1)*N+i] - u_1[(k+2)*N*N+(j-1)*N+i] + 8 * (-u_1[(k-1)*N*N+(j-1)*N+i] + u_1[(k+1)*N*N+(j-1)*N+i]))) + 8 * (mu[k*N*N+(j+1)*N+i] * (u_1[(k-2)*N*N+(j+1)*N+i] - u_1[(k+2)*N*N+(j+1)*N+i] + 8 * (-u_1[(k-1)*N*N+(j+1)*N+i] + u_1[(k+1)*N*N+(j+1)*N+i]))) - (mu[k*N*N+(j+2)*N+i] * (u_1[(k-2)*N*N+(j+2)*N+i] - u_1[(k+2)*N*N+(j+2)*N+i] + 8 * (-u_1[(k-1)*N*N+(j+2)*N+i] + u_1[(k+1)*N*N+(j+2)*N+i])))); r3 += stry[j] * strz[k] * (1e0 / 144) * (la[(k-2)*N*N+j*N+i] * (u_1[(k-2)*N*N+(j-2)*N+i] - u_1[(k-2)*N*N+(j+2)*N+i] + 8 * (-u_1[(k-2)*N*N+(j-1)*N+i] + u_1[(k-2)*N*N+(j+1)*N+i])) - 8 * (la[(k-1)*N*N+j*N+i] * (u_1[(k-1)*N*N+(j-2)*N+i] - u_1[(k-1)*N*N+(j+2)*N+i] + 8 * (-u_1[(k-1)*N*N+(j-1)*N+i] + u_1[(k-1)*N*N+(j+1)*N+i]))) + 8 * (la[(k+1)*N*N+j*N+i] * (u_1[(k+1)*N*N+(j-2)*N+i] - u_1[(k+1)*N*N+(j+2)*N+i] + 8 * (-u_1[(k+1)*N*N+(j-1)*N+i] + u_1[(k+1)*N*N+(j+1)*N+i]))) - (la[(k+2)*N*N+j*N+i] * (u_1[(k+2)*N*N+(j-2)*N+i] - u_1[(k+2)*N*N+(j+2)*N+i] + 8 * (-u_1[(k+2)*N*N+(j-1)*N+i] + u_1[(k+2)*N*N+(j+1)*N+i])))); /* 3 * 3 = 9 flops */ uacc_2[k*N*N+j*N+i] = a1 * uacc_2[k*N*N+j*N+i] + cof * r3; } } } extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) { double *uacc_0; hipMalloc (&uacc_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_0\n"); hipMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *uacc_1; hipMalloc (&uacc_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_1\n"); hipMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *uacc_2; hipMalloc (&uacc_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_2\n"); hipMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *u_0; hipMalloc (&u_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_0\n"); hipMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *u_1; hipMalloc (&u_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_1\n"); hipMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *u_2; hipMalloc (&u_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_2\n"); hipMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *mu; hipMalloc (&mu, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *la; hipMalloc (&la, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for la\n"); hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *strx; hipMalloc (&strx, sizeof(double)*N); check_error ("Failed to allocate device memory for strx\n"); hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice); double *stry; hipMalloc (&stry, sizeof(double)*N); check_error ("Failed to allocate device memory for stry\n"); hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice); double *strz; hipMalloc (&strz, sizeof(double)*N); check_error ("Failed to allocate device memory for strz\n"); hipMemcpy (strz, h_strz, sizeof(double)*N, hipMemcpyHostToDevice); dim3 blockconfig_a (16, 8, 1); dim3 gridconfig_a (ceil(N, blockconfig_a.x), ceil(N, blockconfig_a.y), 1); hipLaunchKernelGGL(( sw4_a) , dim3(gridconfig_a), dim3(blockconfig_a), 0, 0, uacc_0, u_0, u_1, u_2, mu, la, strx, stry, strz, N); dim3 blockconfig_b (16, 8, 1); dim3 gridconfig_b (ceil(N, blockconfig_b.x), ceil(N, blockconfig_b.y), 1); hipLaunchKernelGGL(( sw4_b) , dim3(gridconfig_b), dim3(blockconfig_b), 0, 0, uacc_1, u_0, u_1, u_2, mu, la, strx, stry, strz, N); dim3 blockconfig_c (16, 8, 1); dim3 gridconfig_c (ceil(N, blockconfig_c.x), ceil(N, blockconfig_c.y), 1); hipLaunchKernelGGL(( sw4_c) , dim3(gridconfig_c), dim3(blockconfig_c), 0, 0, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N); hipMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, hipMemcpyDeviceToHost); hipMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost); hipMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, hipMemcpyDeviceToHost); hipFree (uacc_0); hipFree (uacc_1); hipFree (uacc_2); hipFree (u_0); hipFree (u_1); hipFree (u_2); hipFree (mu); hipFree (la); hipFree (strx); hipFree (stry); hipFree (strz); }
b4598b95aeb01c1c3b3e53b97926db0043948652.cu
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void __launch_bounds__ (128,2) sw4_a (double * uacc_0, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * __restrict__ strx, double * __restrict__ stry, double * __restrict__ strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; /* Total 687 flops */ if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 3 for (int k=2; k<=N-3; k+=2) { double a_mux1; double _t_0_; double a_mux2; double _t_1_; double a_mux3; double _t_2_; double a_mux4; double _t_3_; double a_muy1; double _t_4_; double a_muy2; double _t_5_; double a_muy3; double _t_6_; double a_muy4; double _t_7_; double a_muz1; double _t_8_; double a_muz2; double _t_9_; double a_muz3; double _t_10_; double a_muz4; double _t_11_; double _t_14_; double _t_16_; double _t_15_; double _t_13_; double _t_17_; double _t_19_; double _t_18_; double _t_20_; double _t_22_; double _t_21_; double _t_23_; double _t_25_; double _t_24_; double _t_12_; double _t_27_; double _t_26_; double _t_28_; double _t_29_; double _t_30_; double _t_32_; double _t_31_; double _t_33_; double _t_34_; double _t_35_; double r1; double _t_39_; double _t_37_; double _t_40_; double _t_41_; double _t_38_; double _t_43_; double _t_44_; double _t_42_; double _t_46_; double _t_47_; double _t_45_; double _t_48_; double _t_49_; double _t_36_; double _t_52_; double _t_50_; double _t_53_; double _t_54_; double _t_51_; double _t_56_; double _t_57_; double _t_55_; double _t_59_; double _t_60_; double _t_58_; double _t_61_; double _t_62_; double _t_65_; double _t_63_; double _t_66_; double _t_67_; double _t_64_; double _t_69_; double _t_70_; double _t_68_; double _t_72_; double _t_73_; double _t_71_; double _t_74_; double _t_75_; double _t_78_; double _t_76_; double _t_79_; double _t_80_; double _t_77_; double _t_82_; double _t_83_; double _t_81_; double _t_85_; double _t_86_; double _t_84_; double _t_87_; double _t_88_; double uacc_0kc0jc0ic0 = uacc_0[k*N*N+j*N+i]; double b_mux1; double _t_89_; double b_mux2; double _t_90_; double b_mux3; double _t_91_; double b_mux4; double _t_92_; double b_muy1; double _t_93_; double b_muy2; double _t_94_; double b_muy3; double _t_95_; double b_muy4; double _t_96_; double b_muz1; double _t_97_; double b_muz2; double _t_98_; double b_muz3; double _t_99_; double b_muz4; double _t_100_; double _t_103_; double _t_105_; double _t_104_; double _t_102_; double _t_106_; double _t_108_; double _t_107_; double _t_109_; double _t_111_; double _t_110_; double _t_112_; double _t_114_; double _t_113_; double _t_101_; double _t_116_; double _t_115_; double _t_117_; double _t_118_; double _t_119_; double _t_121_; double _t_120_; double _t_122_; double _t_123_; double _t_124_; double r2; double _t_128_; double _t_126_; double _t_129_; double _t_130_; double _t_127_; double _t_132_; double _t_133_; double _t_131_; double _t_135_; double _t_136_; double _t_134_; double _t_137_; double _t_138_; double _t_125_; double _t_141_; double _t_139_; double _t_142_; double _t_143_; double _t_140_; double _t_145_; double _t_146_; double _t_144_; double _t_148_; double _t_149_; double _t_147_; double _t_150_; double _t_151_; double _t_154_; double _t_152_; double _t_155_; double _t_156_; double _t_153_; double _t_158_; double _t_159_; double _t_157_; double _t_161_; double _t_162_; double _t_160_; double _t_163_; double _t_164_; double _t_167_; double _t_165_; double _t_168_; double _t_169_; double _t_166_; double _t_171_; double _t_172_; double _t_170_; double _t_174_; double _t_175_; double _t_173_; double _t_176_; double _t_177_; double uacc_0kp1jc0ic0 = uacc_0[(k+1)*N*N+j*N+i]; a_mux1 = mu[k][j][i-1] * strx[i-1]; _t_0_ = mu[k][j][i] * strx[i]; _t_0_ += mu[k][j][i-2] * strx[i-2]; a_mux1 -= 3.0 / 4.0 * _t_0_; a_mux2 = mu[k][j][i-2] * strx[i-2]; a_mux2 += mu[k][j][i+1] * strx[i+1]; _t_1_ = mu[k][j][i] * strx[i]; _t_1_ += mu[k][j][i-1] * strx[i-1]; a_mux2 += 3.0 * _t_1_; a_mux3 = mu[k][j][i-1] * strx[i-1]; a_mux3 += mu[k][j][i+2] * strx[i+2]; _t_2_ = mu[k][j][i+1] * strx[i+1]; _t_2_ += mu[k][j][i] * strx[i]; a_mux3 += 3.0 * _t_2_; a_mux4 = mu[k][j][i+1] * strx[i+1]; _t_3_ = mu[k][j][i] * strx[i]; _t_3_ += mu[k][j][i+2] * strx[i+2]; a_mux4 -= 3.0 / 4.0 * _t_3_; a_muy1 = mu[k][j-1][i] * stry[j-1]; _t_4_ = mu[k][j][i] * stry[j]; _t_4_ += mu[k][j-2][i] * stry[j-2]; a_muy1 -= 3.0 / 4.0 * _t_4_; a_muy2 = mu[k][j-2][i] * stry[j-2]; a_muy2 += mu[k][j+1][i] * stry[j+1]; _t_5_ = mu[k][j][i] * stry[j]; _t_5_ += mu[k][j-1][i] * stry[j-1]; a_muy2 += 3.0 * _t_5_; a_muy3 = mu[k][j-1][i] * stry[j-1]; a_muy3 += mu[k][j+2][i] * stry[j+2]; _t_6_ = mu[k][j+1][i] * stry[j+1]; _t_6_ += mu[k][j][i] * stry[j]; a_muy3 += 3.0 * _t_6_; a_muy4 = mu[k][j+1][i] * stry[j+1]; _t_7_ = mu[k][j][i] * stry[j]; _t_7_ += mu[k][j+2][i] * stry[j+2]; a_muy4 -= 3.0 / 4.0 * _t_7_; a_muz1 = mu[k-1][j][i] * strz[k-1]; _t_8_ = mu[k][j][i] * strz[k]; _t_8_ += mu[k-2][j][i] * strz[k-2]; a_muz1 -= 3.0 / 4.0 * _t_8_; a_muz2 = mu[k-2][j][i] * strz[k-2]; a_muz2 += mu[k+1][j][i] * strz[k+1]; _t_9_ = mu[k][j][i] * strz[k]; _t_9_ += mu[k-1][j][i] * strz[k-1]; a_muz2 += 3.0 * _t_9_; a_muz3 = mu[k-1][j][i] * strz[k-1]; a_muz3 += mu[k+2][j][i] * strz[k+2]; _t_10_ = mu[k+1][j][i] * strz[k+1]; _t_10_ += mu[k][j][i] * strz[k]; a_muz3 += 3.0 * _t_10_; a_muz4 = mu[k+1][j][i] * strz[k+1]; _t_11_ = mu[k][j][i] * strz[k]; _t_11_ += mu[k+2][j][i] * strz[k+2]; a_muz4 -= 3.0 / 4.0 * _t_11_; _t_14_ = 2.0 * a_mux1; _t_14_ += la[k][j][i-1] * strx[i-1]; _t_16_ = la[k][j][i] * strx[i]; _t_16_ += la[k][j][i-2] * strx[i-2]; _t_14_ -= 3.0 / 4.0 * _t_16_; _t_15_ = u_0[k][j][i-2]; _t_15_ -= u_0[k][j][i]; _t_13_ = _t_14_ * _t_15_; _t_17_ = 2.0 * a_mux2; _t_17_ += la[k][j][i-2] * strx[i-2]; _t_17_ += la[k][j][i+1] * strx[i+1]; _t_19_ = la[k][j][i] * strx[i]; _t_19_ += la[k][j][i-1] * strx[i-1]; _t_17_ += 3.0 * _t_19_; _t_18_ = u_0[k][j][i-1]; _t_18_ -= u_0[k][j][i]; _t_13_ += _t_17_ * _t_18_; _t_20_ = 2.0 * a_mux3; _t_20_ += la[k][j][i-1] * strx[i-1]; _t_20_ += la[k][j][i+2] * strx[i+2]; _t_22_ = la[k][j][i+1] * strx[i+1]; _t_22_ += la[k][j][i] * strx[i]; _t_20_ += 3.0 * _t_22_; _t_21_ = u_0[k][j][i+1]; _t_21_ -= u_0[k][j][i]; _t_13_ += _t_20_ * _t_21_; _t_23_ = 2.0 * a_mux4; _t_23_ += la[k][j][i+1] * strx[i+1]; _t_25_ = la[k][j][i] * strx[i]; _t_25_ += la[k][j][i+2] * strx[i+2]; _t_23_ -= 3.0 / 4.0 * _t_25_; _t_24_ = u_0[k][j][i+2]; _t_24_ -= u_0[k][j][i]; _t_13_ += _t_23_ * _t_24_; _t_12_ = strx[i] * _t_13_; _t_27_ = u_0[k][j-2][i]; _t_27_ -= u_0[k][j][i]; _t_26_ = a_muy1 * _t_27_; _t_28_ = u_0[k][j-1][i]; _t_28_ -= u_0[k][j][i]; _t_26_ += a_muy2 * _t_28_; _t_29_ = u_0[k][j+1][i]; _t_29_ -= u_0[k][j][i]; _t_26_ += a_muy3 * _t_29_; _t_30_ = u_0[k][j+2][i]; _t_30_ -= u_0[k][j][i]; _t_26_ += a_muy4 * _t_30_; _t_12_ += stry[j] * _t_26_; _t_32_ = u_0[k-2][j][i]; _t_32_ -= u_0[k][j][i]; _t_31_ = a_muz1 * _t_32_; _t_33_ = u_0[k-1][j][i]; _t_33_ -= u_0[k][j][i]; _t_31_ += a_muz2 * _t_33_; _t_34_ = u_0[k+1][j][i]; _t_34_ -= u_0[k][j][i]; _t_31_ += a_muz3 * _t_34_; _t_35_ = u_0[k+2][j][i]; _t_35_ -= u_0[k][j][i]; _t_31_ += a_muz4 * _t_35_; _t_12_ += strz[k] * _t_31_; r1 = 1.0 / 6.0 * _t_12_; _t_39_ = strx[i] * stry[j]; _t_37_ = _t_39_ * 1.0 / 144.0; _t_40_ = u_1[k][j-2][i-2]; _t_40_ -= u_1[k][j+2][i-2]; _t_41_ = -u_1[k][j-1][i-2]; _t_41_ += u_1[k][j+1][i-2]; _t_40_ += 8.0 * _t_41_; _t_38_ = la[k][j][i-2] * _t_40_; _t_43_ = u_1[k][j-2][i-1]; _t_43_ -= u_1[k][j+2][i-1]; _t_44_ = -u_1[k][j-1][i-1]; _t_44_ += u_1[k][j+1][i-1]; _t_43_ += 8.0 * _t_44_; _t_42_ = la[k][j][i-1] * _t_43_; _t_38_ -= 8.0 * _t_42_; _t_46_ = u_1[k][j-2][i+1]; _t_46_ -= u_1[k][j+2][i+1]; _t_47_ = -u_1[k][j-1][i+1]; _t_47_ += u_1[k][j+1][i+1]; _t_46_ += 8.0 * _t_47_; _t_45_ = la[k][j][i+1] * _t_46_; _t_38_ += 8.0 * _t_45_; _t_48_ = u_1[k][j-2][i+2]; _t_48_ -= u_1[k][j+2][i+2]; _t_49_ = -u_1[k][j-1][i+2]; _t_49_ += u_1[k][j+1][i+2]; _t_48_ += 8.0 * _t_49_; _t_38_ -= la[k][j][i+2] * _t_48_; _t_36_ = _t_37_ * _t_38_; _t_52_ = strx[i] * strz[k]; _t_50_ = _t_52_ * 1.0 / 144.0; _t_53_ = u_2[k-2][j][i-2]; _t_53_ -= u_2[k+2][j][i-2]; _t_54_ = -u_2[k-1][j][i-2]; _t_54_ += u_2[k+1][j][i-2]; _t_53_ += 8.0 * _t_54_; _t_51_ = la[k][j][i-2] * _t_53_; _t_56_ = u_2[k-2][j][i-1]; _t_56_ -= u_2[k+2][j][i-1]; _t_57_ = -u_2[k-1][j][i-1]; _t_57_ += u_2[k+1][j][i-1]; _t_56_ += 8.0 * _t_57_; _t_55_ = la[k][j][i-1] * _t_56_; _t_51_ -= 8.0 * _t_55_; _t_59_ = u_2[k-2][j][i+1]; _t_59_ -= u_2[k+2][j][i+1]; _t_60_ = -u_2[k-1][j][i+1]; _t_60_ += u_2[k+1][j][i+1]; _t_59_ += 8.0 * _t_60_; _t_58_ = la[k][j][i+1] * _t_59_; _t_51_ += 8.0 * _t_58_; _t_61_ = u_2[k-2][j][i+2]; _t_61_ -= u_2[k+2][j][i+2]; _t_62_ = -u_2[k-1][j][i+2]; _t_62_ += u_2[k+1][j][i+2]; _t_61_ += 8.0 * _t_62_; _t_51_ -= la[k][j][i+2] * _t_61_; _t_36_ += _t_50_ * _t_51_; _t_65_ = strx[i] * stry[j]; _t_63_ = _t_65_ * 1.0 / 144.0; _t_66_ = u_1[k][j-2][i-2]; _t_66_ -= u_1[k][j-2][i+2]; _t_67_ = -u_1[k][j-2][i-1]; _t_67_ += u_1[k][j-2][i+1]; _t_66_ += 8.0 * _t_67_; _t_64_ = mu[k][j-2][i] * _t_66_; _t_69_ = u_1[k][j-1][i-2]; _t_69_ -= u_1[k][j-1][i+2]; _t_70_ = -u_1[k][j-1][i-1]; _t_70_ += u_1[k][j-1][i+1]; _t_69_ += 8.0 * _t_70_; _t_68_ = mu[k][j-1][i] * _t_69_; _t_64_ -= 8.0 * _t_68_; _t_72_ = u_1[k][j+1][i-2]; _t_72_ -= u_1[k][j+1][i+2]; _t_73_ = -u_1[k][j+1][i-1]; _t_73_ += u_1[k][j+1][i+1]; _t_72_ += 8.0 * _t_73_; _t_71_ = mu[k][j+1][i] * _t_72_; _t_64_ += 8.0 * _t_71_; _t_74_ = u_1[k][j+2][i-2]; _t_74_ -= u_1[k][j+2][i+2]; _t_75_ = -u_1[k][j+2][i-1]; _t_75_ += u_1[k][j+2][i+1]; _t_74_ += 8.0 * _t_75_; _t_64_ -= mu[k][j+2][i] * _t_74_; _t_36_ += _t_63_ * _t_64_; _t_78_ = strx[i] * strz[k]; _t_76_ = _t_78_ * 1.0 / 144.0; _t_79_ = u_2[k-2][j][i-2]; _t_79_ -= u_2[k-2][j][i+2]; _t_80_ = -u_2[k-2][j][i-1]; _t_80_ += u_2[k-2][j][i+1]; _t_79_ += 8.0 * _t_80_; _t_77_ = mu[k-2][j][i] * _t_79_; _t_82_ = u_2[k-1][j][i-2]; _t_82_ -= u_2[k-1][j][i+2]; _t_83_ = -u_2[k-1][j][i-1]; _t_83_ += u_2[k-1][j][i+1]; _t_82_ += 8.0 * _t_83_; _t_81_ = mu[k-1][j][i] * _t_82_; _t_77_ -= 8.0 * _t_81_; _t_85_ = u_2[k+1][j][i-2]; _t_85_ -= u_2[k+1][j][i+2]; _t_86_ = -u_2[k+1][j][i-1]; _t_86_ += u_2[k+1][j][i+1]; _t_85_ += 8.0 * _t_86_; _t_84_ = mu[k+1][j][i] * _t_85_; _t_77_ += 8.0 * _t_84_; _t_87_ = u_2[k+2][j][i-2]; _t_87_ -= u_2[k+2][j][i+2]; _t_88_ = -u_2[k+2][j][i-1]; _t_88_ += u_2[k+2][j][i+1]; _t_87_ += 8.0 * _t_88_; _t_77_ -= mu[k+2][j][i] * _t_87_; _t_36_ += _t_76_ * _t_77_; r1 += _t_36_; uacc_0kc0jc0ic0 = a1 * uacc_0kc0jc0ic0; uacc_0kc0jc0ic0 += cof * r1; uacc_0[k*N*N+j*N+i] = uacc_0kc0jc0ic0; b_mux1 = mu[k+1][j][i-1] * strx[i-1]; _t_89_ = mu[k+1][j][i] * strx[i]; _t_89_ += mu[k+1][j][i-2] * strx[i-2]; b_mux1 -= 3.0 / 4.0 * _t_89_; b_mux2 = mu[k+1][j][i-2] * strx[i-2]; b_mux2 += mu[k+1][j][i+1] * strx[i+1]; _t_90_ = mu[k+1][j][i] * strx[i]; _t_90_ += mu[k+1][j][i-1] * strx[i-1]; b_mux2 += 3.0 * _t_90_; b_mux3 = mu[k+1][j][i-1] * strx[i-1]; b_mux3 += mu[k+1][j][i+2] * strx[i+2]; _t_91_ = mu[k+1][j][i+1] * strx[i+1]; _t_91_ += mu[k+1][j][i] * strx[i]; b_mux3 += 3.0 * _t_91_; b_mux4 = mu[k+1][j][i+1] * strx[i+1]; _t_92_ = mu[k+1][j][i] * strx[i]; _t_92_ += mu[k+1][j][i+2] * strx[i+2]; b_mux4 -= 3.0 / 4.0 * _t_92_; b_muy1 = mu[k+1][j-1][i] * stry[j-1]; _t_93_ = mu[k+1][j][i] * stry[j]; _t_93_ += mu[k+1][j-2][i] * stry[j-2]; b_muy1 -= 3.0 / 4.0 * _t_93_; b_muy2 = mu[k+1][j-2][i] * stry[j-2]; b_muy2 += mu[k+1][j+1][i] * stry[j+1]; _t_94_ = mu[k+1][j][i] * stry[j]; _t_94_ += mu[k+1][j-1][i] * stry[j-1]; b_muy2 += 3.0 * _t_94_; b_muy3 = mu[k+1][j-1][i] * stry[j-1]; b_muy3 += mu[k+1][j+2][i] * stry[j+2]; _t_95_ = mu[k+1][j+1][i] * stry[j+1]; _t_95_ += mu[k+1][j][i] * stry[j]; b_muy3 += 3.0 * _t_95_; b_muy4 = mu[k+1][j+1][i] * stry[j+1]; _t_96_ = mu[k+1][j][i] * stry[j]; _t_96_ += mu[k+1][j+2][i] * stry[j+2]; b_muy4 -= 3.0 / 4.0 * _t_96_; b_muz1 = mu[k][j][i] * strz[k]; _t_97_ = mu[k+1][j][i] * strz[k+1]; _t_97_ += mu[k-1][j][i] * strz[k-1]; b_muz1 -= 3.0 / 4.0 * _t_97_; b_muz2 = mu[k-1][j][i] * strz[k-1]; b_muz2 += mu[k+2][j][i] * strz[k+2]; _t_98_ = mu[k+1][j][i] * strz[k+1]; _t_98_ += mu[k][j][i] * strz[k]; b_muz2 += 3.0 * _t_98_; b_muz3 = mu[k][j][i] * strz[k]; b_muz3 += mu[k+3][j][i] * strz[k+3]; _t_99_ = mu[k+2][j][i] * strz[k+2]; _t_99_ += mu[k+1][j][i] * strz[k+1]; b_muz3 += 3.0 * _t_99_; b_muz4 = mu[k+2][j][i] * strz[k+2]; _t_100_ = mu[k+1][j][i] * strz[k+1]; _t_100_ += mu[k+3][j][i] * strz[k+3]; b_muz4 -= 3.0 / 4.0 * _t_100_; _t_103_ = 2.0 * b_mux1; _t_103_ += la[k+1][j][i-1] * strx[i-1]; _t_105_ = la[k+1][j][i] * strx[i]; _t_105_ += la[k+1][j][i-2] * strx[i-2]; _t_103_ -= 3.0 / 4.0 * _t_105_; _t_104_ = u_0[k+1][j][i-2]; _t_104_ -= u_0[k+1][j][i]; _t_102_ = _t_103_ * _t_104_; _t_106_ = 2.0 * b_mux2; _t_106_ += la[k+1][j][i-2] * strx[i-2]; _t_106_ += la[k+1][j][i+1] * strx[i+1]; _t_108_ = la[k+1][j][i] * strx[i]; _t_108_ += la[k+1][j][i-1] * strx[i-1]; _t_106_ += 3.0 * _t_108_; _t_107_ = u_0[k+1][j][i-1]; _t_107_ -= u_0[k+1][j][i]; _t_102_ += _t_106_ * _t_107_; _t_109_ = 2.0 * b_mux3; _t_109_ += la[k+1][j][i-1] * strx[i-1]; _t_109_ += la[k+1][j][i+2] * strx[i+2]; _t_111_ = la[k+1][j][i+1] * strx[i+1]; _t_111_ += la[k+1][j][i] * strx[i]; _t_109_ += 3.0 * _t_111_; _t_110_ = u_0[k+1][j][i+1]; _t_110_ -= u_0[k+1][j][i]; _t_102_ += _t_109_ * _t_110_; _t_112_ = 2.0 * b_mux4; _t_112_ += la[k+1][j][i+1] * strx[i+1]; _t_114_ = la[k+1][j][i] * strx[i]; _t_114_ += la[k+1][j][i+2] * strx[i+2]; _t_112_ -= 3.0 / 4.0 * _t_114_; _t_113_ = u_0[k+1][j][i+2]; _t_113_ -= u_0[k+1][j][i]; _t_102_ += _t_112_ * _t_113_; _t_101_ = strx[i] * _t_102_; _t_116_ = u_0[k+1][j-2][i]; _t_116_ -= u_0[k+1][j][i]; _t_115_ = b_muy1 * _t_116_; _t_117_ = u_0[k+1][j-1][i]; _t_117_ -= u_0[k+1][j][i]; _t_115_ += b_muy2 * _t_117_; _t_118_ = u_0[k+1][j+1][i]; _t_118_ -= u_0[k+1][j][i]; _t_115_ += b_muy3 * _t_118_; _t_119_ = u_0[k+1][j+2][i]; _t_119_ -= u_0[k+1][j][i]; _t_115_ += b_muy4 * _t_119_; _t_101_ += stry[j] * _t_115_; _t_121_ = u_0[k-1][j][i]; _t_121_ -= u_0[k+1][j][i]; _t_120_ = b_muz1 * _t_121_; _t_122_ = u_0[k][j][i]; _t_122_ -= u_0[k+1][j][i]; _t_120_ += b_muz2 * _t_122_; _t_123_ = u_0[k+2][j][i]; _t_123_ -= u_0[k+1][j][i]; _t_120_ += b_muz3 * _t_123_; _t_124_ = u_0[k+3][j][i]; _t_124_ -= u_0[k+1][j][i]; _t_120_ += b_muz4 * _t_124_; _t_101_ += strz[k+1] * _t_120_; r2 = 1.0 / 6.0 * _t_101_; _t_128_ = strx[i] * stry[j]; _t_126_ = _t_128_ * 1.0 / 144.0; _t_129_ = u_1[k+1][j-2][i-2]; _t_129_ -= u_1[k+1][j+2][i-2]; _t_130_ = -u_1[k+1][j-1][i-2]; _t_130_ += u_1[k+1][j+1][i-2]; _t_129_ += 8.0 * _t_130_; _t_127_ = la[k+1][j][i-2] * _t_129_; _t_132_ = u_1[k+1][j-2][i-1]; _t_132_ -= u_1[k+1][j+2][i-1]; _t_133_ = -u_1[k+1][j-1][i-1]; _t_133_ += u_1[k+1][j+1][i-1]; _t_132_ += 8.0 * _t_133_; _t_131_ = la[k+1][j][i-1] * _t_132_; _t_127_ -= 8.0 * _t_131_; _t_135_ = u_1[k+1][j-2][i+1]; _t_135_ -= u_1[k+1][j+2][i+1]; _t_136_ = -u_1[k+1][j-1][i+1]; _t_136_ += u_1[k+1][j+1][i+1]; _t_135_ += 8.0 * _t_136_; _t_134_ = la[k+1][j][i+1] * _t_135_; _t_127_ += 8.0 * _t_134_; _t_137_ = u_1[k+1][j-2][i+2]; _t_137_ -= u_1[k+1][j+2][i+2]; _t_138_ = -u_1[k+1][j-1][i+2]; _t_138_ += u_1[k+1][j+1][i+2]; _t_137_ += 8.0 * _t_138_; _t_127_ -= la[k+1][j][i+2] * _t_137_; _t_125_ = _t_126_ * _t_127_; _t_141_ = strx[i] * strz[k+1]; _t_139_ = _t_141_ * 1.0 / 144.0; _t_142_ = u_2[k-1][j][i-2]; _t_142_ -= u_2[k+3][j][i-2]; _t_143_ = -u_2[k][j][i-2]; _t_143_ += u_2[k+2][j][i-2]; _t_142_ += 8.0 * _t_143_; _t_140_ = la[k+1][j][i-2] * _t_142_; _t_145_ = u_2[k-1][j][i-1]; _t_145_ -= u_2[k+3][j][i-1]; _t_146_ = -u_2[k][j][i-1]; _t_146_ += u_2[k+2][j][i-1]; _t_145_ += 8.0 * _t_146_; _t_144_ = la[k+1][j][i-1] * _t_145_; _t_140_ -= 8.0 * _t_144_; _t_148_ = u_2[k-1][j][i+1]; _t_148_ -= u_2[k+3][j][i+1]; _t_149_ = -u_2[k][j][i+1]; _t_149_ += u_2[k+2][j][i+1]; _t_148_ += 8.0 * _t_149_; _t_147_ = la[k+1][j][i+1] * _t_148_; _t_140_ += 8.0 * _t_147_; _t_150_ = u_2[k-1][j][i+2]; _t_150_ -= u_2[k+3][j][i+2]; _t_151_ = -u_2[k][j][i+2]; _t_151_ += u_2[k+2][j][i+2]; _t_150_ += 8.0 * _t_151_; _t_140_ -= la[k+1][j][i+2] * _t_150_; _t_125_ += _t_139_ * _t_140_; _t_154_ = strx[i] * stry[j]; _t_152_ = _t_154_ * 1.0 / 144.0; _t_155_ = u_1[k+1][j-2][i-2]; _t_155_ -= u_1[k+1][j-2][i+2]; _t_156_ = -u_1[k+1][j-2][i-1]; _t_156_ += u_1[k+1][j-2][i+1]; _t_155_ += 8.0 * _t_156_; _t_153_ = mu[k+1][j-2][i] * _t_155_; _t_158_ = u_1[k+1][j-1][i-2]; _t_158_ -= u_1[k+1][j-1][i+2]; _t_159_ = -u_1[k+1][j-1][i-1]; _t_159_ += u_1[k+1][j-1][i+1]; _t_158_ += 8.0 * _t_159_; _t_157_ = mu[k+1][j-1][i] * _t_158_; _t_153_ -= 8.0 * _t_157_; _t_161_ = u_1[k+1][j+1][i-2]; _t_161_ -= u_1[k+1][j+1][i+2]; _t_162_ = -u_1[k+1][j+1][i-1]; _t_162_ += u_1[k+1][j+1][i+1]; _t_161_ += 8.0 * _t_162_; _t_160_ = mu[k+1][j+1][i] * _t_161_; _t_153_ += 8.0 * _t_160_; _t_163_ = u_1[k+1][j+2][i-2]; _t_163_ -= u_1[k+1][j+2][i+2]; _t_164_ = -u_1[k+1][j+2][i-1]; _t_164_ += u_1[k+1][j+2][i+1]; _t_163_ += 8.0 * _t_164_; _t_153_ -= mu[k+1][j+2][i] * _t_163_; _t_125_ += _t_152_ * _t_153_; _t_167_ = strx[i] * strz[k+1]; _t_165_ = _t_167_ * 1.0 / 144.0; _t_168_ = u_2[k-1][j][i-2]; _t_168_ -= u_2[k-1][j][i+2]; _t_169_ = -u_2[k-1][j][i-1]; _t_169_ += u_2[k-1][j][i+1]; _t_168_ += 8.0 * _t_169_; _t_166_ = mu[k-1][j][i] * _t_168_; _t_171_ = u_2[k][j][i-2]; _t_171_ -= u_2[k][j][i+2]; _t_172_ = -u_2[k][j][i-1]; _t_172_ += u_2[k][j][i+1]; _t_171_ += 8.0 * _t_172_; _t_170_ = mu[k][j][i] * _t_171_; _t_166_ -= 8.0 * _t_170_; _t_174_ = u_2[k+2][j][i-2]; _t_174_ -= u_2[k+2][j][i+2]; _t_175_ = -u_2[k+2][j][i-1]; _t_175_ += u_2[k+2][j][i+1]; _t_174_ += 8.0 * _t_175_; _t_173_ = mu[k+2][j][i] * _t_174_; _t_166_ += 8.0 * _t_173_; _t_176_ = u_2[k+3][j][i-2]; _t_176_ -= u_2[k+3][j][i+2]; _t_177_ = -u_2[k+3][j][i-1]; _t_177_ += u_2[k+3][j][i+1]; _t_176_ += 8.0 * _t_177_; _t_166_ -= mu[k+3][j][i] * _t_176_; _t_125_ += _t_165_ * _t_166_; r2 += _t_125_; uacc_0kp1jc0ic0 = a1 * uacc_0kp1jc0ic0; uacc_0kp1jc0ic0 += cof * r2; uacc_0[(k+1)*N*N+j*N+i] = uacc_0kp1jc0ic0; } } } __global__ void __launch_bounds__ (128,2) sw4_b (double * uacc_1, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * __restrict__ strx, double * __restrict__ stry, double * __restrict__ strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; /* Total 687 flops */ if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 2 for (int k=2; k<=N-3; k+=2) { double a_mux1; double _t_0_; double a_mux2; double _t_1_; double a_mux3; double _t_2_; double a_mux4; double _t_3_; double a_muy1; double _t_4_; double a_muy2; double _t_5_; double a_muy3; double _t_6_; double a_muy4; double _t_7_; double a_muz1; double _t_8_; double a_muz2; double _t_9_; double a_muz3; double _t_10_; double a_muz4; double _t_11_; double _t_14_; double _t_13_; double _t_15_; double _t_16_; double _t_17_; double _t_12_; double _t_19_; double _t_21_; double _t_20_; double _t_18_; double _t_22_; double _t_24_; double _t_23_; double _t_25_; double _t_27_; double _t_26_; double _t_28_; double _t_30_; double _t_29_; double _t_32_; double _t_31_; double _t_33_; double _t_34_; double _t_35_; double r2; double _t_39_; double _t_37_; double _t_40_; double _t_41_; double _t_38_; double _t_43_; double _t_44_; double _t_42_; double _t_46_; double _t_47_; double _t_45_; double _t_48_; double _t_49_; double _t_36_; double _t_52_; double _t_50_; double _t_53_; double _t_54_; double _t_51_; double _t_56_; double _t_57_; double _t_55_; double _t_59_; double _t_60_; double _t_58_; double _t_61_; double _t_62_; double _t_65_; double _t_63_; double _t_66_; double _t_67_; double _t_64_; double _t_69_; double _t_70_; double _t_68_; double _t_72_; double _t_73_; double _t_71_; double _t_74_; double _t_75_; double _t_78_; double _t_76_; double _t_79_; double _t_80_; double _t_77_; double _t_82_; double _t_83_; double _t_81_; double _t_85_; double _t_86_; double _t_84_; double _t_87_; double _t_88_; double uacc_1kc0jc0ic0 = uacc_1[k*N*N+j*N+i]; double b_mux1; double _t_89_; double b_mux2; double _t_90_; double b_mux3; double _t_91_; double b_mux4; double _t_92_; double b_muy1; double _t_93_; double b_muy2; double _t_94_; double b_muy3; double _t_95_; double b_muy4; double _t_96_; double b_muz1; double _t_97_; double b_muz2; double _t_98_; double b_muz3; double _t_99_; double b_muz4; double _t_100_; double _t_103_; double _t_102_; double _t_104_; double _t_105_; double _t_106_; double _t_101_; double _t_108_; double _t_110_; double _t_109_; double _t_107_; double _t_111_; double _t_113_; double _t_112_; double _t_114_; double _t_116_; double _t_115_; double _t_117_; double _t_119_; double _t_118_; double _t_121_; double _t_120_; double _t_122_; double _t_123_; double _t_124_; double r3; double _t_128_; double _t_126_; double _t_129_; double _t_130_; double _t_127_; double _t_132_; double _t_133_; double _t_131_; double _t_135_; double _t_136_; double _t_134_; double _t_137_; double _t_138_; double _t_125_; double _t_141_; double _t_139_; double _t_142_; double _t_143_; double _t_140_; double _t_145_; double _t_146_; double _t_144_; double _t_148_; double _t_149_; double _t_147_; double _t_150_; double _t_151_; double _t_154_; double _t_152_; double _t_155_; double _t_156_; double _t_153_; double _t_158_; double _t_159_; double _t_157_; double _t_161_; double _t_162_; double _t_160_; double _t_163_; double _t_164_; double _t_167_; double _t_165_; double _t_168_; double _t_169_; double _t_166_; double _t_171_; double _t_172_; double _t_170_; double _t_174_; double _t_175_; double _t_173_; double _t_176_; double _t_177_; double uacc_1kp1jc0ic0 = uacc_1[(k+1)*N*N+j*N+i]; a_mux1 = mu[k][j][i-1] * strx[i-1]; _t_0_ = mu[k][j][i] * strx[i]; _t_0_ += mu[k][j][i-2] * strx[i-2]; a_mux1 -= 3.0 / 4.0 * _t_0_; a_mux2 = mu[k][j][i-2] * strx[i-2]; a_mux2 += mu[k][j][i+1] * strx[i+1]; _t_1_ = mu[k][j][i] * strx[i]; _t_1_ += mu[k][j][i-1] * strx[i-1]; a_mux2 += 3.0 * _t_1_; a_mux3 = mu[k][j][i-1] * strx[i-1]; a_mux3 += mu[k][j][i+2] * strx[i+2]; _t_2_ = mu[k][j][i+1] * strx[i+1]; _t_2_ += mu[k][j][i] * strx[i]; a_mux3 += 3.0 * _t_2_; a_mux4 = mu[k][j][i+1] * strx[i+1]; _t_3_ = mu[k][j][i] * strx[i]; _t_3_ += mu[k][j][i+2] * strx[i+2]; a_mux4 -= 3.0 / 4.0 * _t_3_; a_muy1 = mu[k][j-1][i] * stry[j-1]; _t_4_ = mu[k][j][i] * stry[j]; _t_4_ += mu[k][j-2][i] * stry[j-2]; a_muy1 -= 3.0 / 4.0 * _t_4_; a_muy2 = mu[k][j-2][i] * stry[j-2]; a_muy2 += mu[k][j+1][i] * stry[j+1]; _t_5_ = mu[k][j][i] * stry[j]; _t_5_ += mu[k][j-1][i] * stry[j-1]; a_muy2 += 3.0 * _t_5_; a_muy3 = mu[k][j-1][i] * stry[j-1]; a_muy3 += mu[k][j+2][i] * stry[j+2]; _t_6_ = mu[k][j+1][i] * stry[j+1]; _t_6_ += mu[k][j][i] * stry[j]; a_muy3 += 3.0 * _t_6_; a_muy4 = mu[k][j+1][i] * stry[j+1]; _t_7_ = mu[k][j][i] * stry[j]; _t_7_ += mu[k][j+2][i] * stry[j+2]; a_muy4 -= 3.0 / 4.0 * _t_7_; a_muz1 = mu[k-1][j][i] * strz[k-1]; _t_8_ = mu[k][j][i] * strz[k]; _t_8_ += mu[k-2][j][i] * strz[k-2]; a_muz1 -= 3.0 / 4.0 * _t_8_; a_muz2 = mu[k-2][j][i] * strz[k-2]; a_muz2 += mu[k+1][j][i] * strz[k+1]; _t_9_ = mu[k][j][i] * strz[k]; _t_9_ += mu[k-1][j][i] * strz[k-1]; a_muz2 += 3.0 * _t_9_; a_muz3 = mu[k-1][j][i] * strz[k-1]; a_muz3 += mu[k+2][j][i] * strz[k+2]; _t_10_ = mu[k+1][j][i] * strz[k+1]; _t_10_ += mu[k][j][i] * strz[k]; a_muz3 += 3.0 * _t_10_; a_muz4 = mu[k+1][j][i] * strz[k+1]; _t_11_ = mu[k][j][i] * strz[k]; _t_11_ += mu[k+2][j][i] * strz[k+2]; a_muz4 -= 3.0 / 4.0 * _t_11_; _t_14_ = u_1[k][j][i-2]; _t_14_ -= u_1[k][j][i]; _t_13_ = a_mux1 * _t_14_; _t_15_ = u_1[k][j][i-1]; _t_15_ -= u_1[k][j][i]; _t_13_ += a_mux2 * _t_15_; _t_16_ = u_1[k][j][i+1]; _t_16_ -= u_1[k][j][i]; _t_13_ += a_mux3 * _t_16_; _t_17_ = u_1[k][j][i+2]; _t_17_ -= u_1[k][j][i]; _t_13_ += a_mux4 * _t_17_; _t_12_ = strx[i] * _t_13_; _t_19_ = 2.0 * a_muy1; _t_19_ += la[k][j-1][i] * stry[j-1]; _t_21_ = la[k][j][i] * stry[j]; _t_21_ += la[k][j-2][i] * stry[j-2]; _t_19_ -= 3.0 / 4.0 * _t_21_; _t_20_ = u_1[k][j-2][i]; _t_20_ -= u_1[k][j][i]; _t_18_ = _t_19_ * _t_20_; _t_22_ = 2.0 * a_muy2; _t_22_ += la[k][j-2][i] * stry[j-2]; _t_22_ += la[k][j+1][i] * stry[j+1]; _t_24_ = la[k][j][i] * stry[j]; _t_24_ += la[k][j-1][i] * stry[j-1]; _t_22_ += 3.0 * _t_24_; _t_23_ = u_1[k][j-1][i]; _t_23_ -= u_1[k][j][i]; _t_18_ += _t_22_ * _t_23_; _t_25_ = 2.0 * a_muy3; _t_25_ += la[k][j-1][i] * stry[j-1]; _t_25_ += la[k][j+2][i] * stry[j+2]; _t_27_ = la[k][j+1][i] * stry[j+1]; _t_27_ += la[k][j][i] * stry[j]; _t_25_ += 3.0 * _t_27_; _t_26_ = u_1[k][j+1][i]; _t_26_ -= u_1[k][j][i]; _t_18_ += _t_25_ * _t_26_; _t_28_ = 2.0 * a_muy4; _t_28_ += la[k][j+1][i] * stry[j+1]; _t_30_ = la[k][j][i] * stry[j]; _t_30_ += la[k][j+2][i] * stry[j+2]; _t_28_ -= 3.0 / 4.0 * _t_30_; _t_29_ = u_1[k][j+2][i]; _t_29_ -= u_1[k][j][i]; _t_18_ += _t_28_ * _t_29_; _t_12_ += stry[j] * _t_18_; _t_32_ = u_1[k-2][j][i]; _t_32_ -= u_1[k][j][i]; _t_31_ = a_muz1 * _t_32_; _t_33_ = u_1[k-1][j][i]; _t_33_ -= u_1[k][j][i]; _t_31_ += a_muz2 * _t_33_; _t_34_ = u_1[k+1][j][i]; _t_34_ -= u_1[k][j][i]; _t_31_ += a_muz3 * _t_34_; _t_35_ = u_1[k+2][j][i]; _t_35_ -= u_1[k][j][i]; _t_31_ += a_muz4 * _t_35_; _t_12_ += strz[k] * _t_31_; r2 = 1.0 / 6.0 * _t_12_; _t_39_ = strx[i] * stry[j]; _t_37_ = _t_39_ * 1.0 / 144.0; _t_40_ = u_0[k][j-2][i-2]; _t_40_ -= u_0[k][j+2][i-2]; _t_41_ = -u_0[k][j-1][i-2]; _t_41_ += u_0[k][j+1][i-2]; _t_40_ += 8.0 * _t_41_; _t_38_ = mu[k][j][i-2] * _t_40_; _t_43_ = u_0[k][j-2][i-1]; _t_43_ -= u_0[k][j+2][i-1]; _t_44_ = -u_0[k][j-1][i-1]; _t_44_ += u_0[k][j+1][i-1]; _t_43_ += 8.0 * _t_44_; _t_42_ = mu[k][j][i-1] * _t_43_; _t_38_ -= 8.0 * _t_42_; _t_46_ = u_0[k][j-2][i+1]; _t_46_ -= u_0[k][j+2][i+1]; _t_47_ = -u_0[k][j-1][i+1]; _t_47_ += u_0[k][j+1][i+1]; _t_46_ += 8.0 * _t_47_; _t_45_ = mu[k][j][i+1] * _t_46_; _t_38_ += 8.0 * _t_45_; _t_48_ = u_0[k][j-2][i+2]; _t_48_ -= u_0[k][j+2][i+2]; _t_49_ = -u_0[k][j-1][i+2]; _t_49_ += u_0[k][j+1][i+2]; _t_48_ += 8.0 * _t_49_; _t_38_ -= mu[k][j][i+2] * _t_48_; _t_36_ = _t_37_ * _t_38_; _t_52_ = strx[i] * stry[j]; _t_50_ = _t_52_ * 1.0 / 144.0; _t_53_ = u_0[k][j-2][i-2]; _t_53_ -= u_0[k][j-2][i+2]; _t_54_ = -u_0[k][j-2][i-1]; _t_54_ += u_0[k][j-2][i+1]; _t_53_ += 8.0 * _t_54_; _t_51_ = la[k][j-2][i] * _t_53_; _t_56_ = u_0[k][j-1][i-2]; _t_56_ -= u_0[k][j-1][i+2]; _t_57_ = -u_0[k][j-1][i-1]; _t_57_ += u_0[k][j-1][i+1]; _t_56_ += 8.0 * _t_57_; _t_55_ = la[k][j-1][i] * _t_56_; _t_51_ -= 8.0 * _t_55_; _t_59_ = u_0[k][j+1][i-2]; _t_59_ -= u_0[k][j+1][i+2]; _t_60_ = -u_0[k][j+1][i-1]; _t_60_ += u_0[k][j+1][i+1]; _t_59_ += 8.0 * _t_60_; _t_58_ = la[k][j+1][i] * _t_59_; _t_51_ += 8.0 * _t_58_; _t_61_ = u_0[k][j+2][i-2]; _t_61_ -= u_0[k][j+2][i+2]; _t_62_ = -u_0[k][j+2][i-1]; _t_62_ += u_0[k][j+2][i+1]; _t_61_ += 8.0 * _t_62_; _t_51_ -= la[k][j+2][i] * _t_61_; _t_36_ += _t_50_ * _t_51_; _t_65_ = stry[j] * strz[k]; _t_63_ = _t_65_ * 1.0 / 144.0; _t_66_ = u_2[k-2][j-2][i]; _t_66_ -= u_2[k+2][j-2][i]; _t_67_ = -u_2[k-1][j-2][i]; _t_67_ += u_2[k+1][j-2][i]; _t_66_ += 8.0 * _t_67_; _t_64_ = la[k][j-2][i] * _t_66_; _t_69_ = u_2[k-2][j-1][i]; _t_69_ -= u_2[k+2][j-1][i]; _t_70_ = -u_2[k-1][j-1][i]; _t_70_ += u_2[k+1][j-1][i]; _t_69_ += 8.0 * _t_70_; _t_68_ = la[k][j-1][i] * _t_69_; _t_64_ -= 8.0 * _t_68_; _t_72_ = u_2[k-2][j+1][i]; _t_72_ -= u_2[k+2][j+1][i]; _t_73_ = -u_2[k-1][j+1][i]; _t_73_ += u_2[k+1][j+1][i]; _t_72_ += 8.0 * _t_73_; _t_71_ = la[k][j+1][i] * _t_72_; _t_64_ += 8.0 * _t_71_; _t_74_ = u_2[k-2][j+2][i]; _t_74_ -= u_2[k+2][j+2][i]; _t_75_ = -u_2[k-1][j+2][i]; _t_75_ += u_2[k+1][j+2][i]; _t_74_ += 8.0 * _t_75_; _t_64_ -= la[k][j+2][i] * _t_74_; _t_36_ += _t_63_ * _t_64_; _t_78_ = stry[j] * strz[k]; _t_76_ = _t_78_ * 1.0 / 144.0; _t_79_ = u_2[k-2][j-2][i]; _t_79_ -= u_2[k-2][j+2][i]; _t_80_ = -u_2[k-2][j-1][i]; _t_80_ += u_2[k-2][j+1][i]; _t_79_ += 8.0 * _t_80_; _t_77_ = mu[k-2][j][i] * _t_79_; _t_82_ = u_2[k-1][j-2][i]; _t_82_ -= u_2[k-1][j+2][i]; _t_83_ = -u_2[k-1][j-1][i]; _t_83_ += u_2[k-1][j+1][i]; _t_82_ += 8.0 * _t_83_; _t_81_ = mu[k-1][j][i] * _t_82_; _t_77_ -= 8.0 * _t_81_; _t_85_ = u_2[k+1][j-2][i]; _t_85_ -= u_2[k+1][j+2][i]; _t_86_ = -u_2[k+1][j-1][i]; _t_86_ += u_2[k+1][j+1][i]; _t_85_ += 8.0 * _t_86_; _t_84_ = mu[k+1][j][i] * _t_85_; _t_77_ += 8.0 * _t_84_; _t_87_ = u_2[k+2][j-2][i]; _t_87_ -= u_2[k+2][j+2][i]; _t_88_ = -u_2[k+2][j-1][i]; _t_88_ += u_2[k+2][j+1][i]; _t_87_ += 8.0 * _t_88_; _t_77_ -= mu[k+2][j][i] * _t_87_; _t_36_ += _t_76_ * _t_77_; r2 += _t_36_; uacc_1kc0jc0ic0 = a1 * uacc_1kc0jc0ic0; uacc_1kc0jc0ic0 += cof * r2; uacc_1[k*N*N+j*N+i] = uacc_1kc0jc0ic0; b_mux1 = mu[k+1][j][i-1] * strx[i-1]; _t_89_ = mu[k+1][j][i] * strx[i]; _t_89_ += mu[k+1][j][i-2] * strx[i-2]; b_mux1 -= 3.0 / 4.0 * _t_89_; b_mux2 = mu[k+1][j][i-2] * strx[i-2]; b_mux2 += mu[k+1][j][i+1] * strx[i+1]; _t_90_ = mu[k+1][j][i] * strx[i]; _t_90_ += mu[k+1][j][i-1] * strx[i-1]; b_mux2 += 3.0 * _t_90_; b_mux3 = mu[k+1][j][i-1] * strx[i-1]; b_mux3 += mu[k+1][j][i+2] * strx[i+2]; _t_91_ = mu[k+1][j][i+1] * strx[i+1]; _t_91_ += mu[k+1][j][i] * strx[i]; b_mux3 += 3.0 * _t_91_; b_mux4 = mu[k+1][j][i+1] * strx[i+1]; _t_92_ = mu[k+1][j][i] * strx[i]; _t_92_ += mu[k+1][j][i+2] * strx[i+2]; b_mux4 -= 3.0 / 4.0 * _t_92_; b_muy1 = mu[k+1][j-1][i] * stry[j-1]; _t_93_ = mu[k+1][j][i] * stry[j]; _t_93_ += mu[k+1][j-2][i] * stry[j-2]; b_muy1 -= 3.0 / 4.0 * _t_93_; b_muy2 = mu[k+1][j-2][i] * stry[j-2]; b_muy2 += mu[k+1][j+1][i] * stry[j+1]; _t_94_ = mu[k+1][j][i] * stry[j]; _t_94_ += mu[k+1][j-1][i] * stry[j-1]; b_muy2 += 3.0 * _t_94_; b_muy3 = mu[k+1][j-1][i] * stry[j-1]; b_muy3 += mu[k+1][j+2][i] * stry[j+2]; _t_95_ = mu[k+1][j+1][i] * stry[j+1]; _t_95_ += mu[k+1][j][i] * stry[j]; b_muy3 += 3.0 * _t_95_; b_muy4 = mu[k+1][j+1][i] * stry[j+1]; _t_96_ = mu[k+1][j][i] * stry[j]; _t_96_ += mu[k+1][j+2][i] * stry[j+2]; b_muy4 -= 3.0 / 4.0 * _t_96_; b_muz1 = mu[k][j][i] * strz[k]; _t_97_ = mu[k+1][j][i] * strz[k+1]; _t_97_ += mu[k-1][j][i] * strz[k-1]; b_muz1 -= 3.0 / 4.0 * _t_97_; b_muz2 = mu[k-1][j][i] * strz[k-1]; b_muz2 += mu[k+2][j][i] * strz[k+2]; _t_98_ = mu[k+1][j][i] * strz[k+1]; _t_98_ += mu[k][j][i] * strz[k]; b_muz2 += 3.0 * _t_98_; b_muz3 = mu[k][j][i] * strz[k]; b_muz3 += mu[k+3][j][i] * strz[k+3]; _t_99_ = mu[k+2][j][i] * strz[k+2]; _t_99_ += mu[k+1][j][i] * strz[k+1]; b_muz3 += 3.0 * _t_99_; b_muz4 = mu[k+2][j][i] * strz[k+2]; _t_100_ = mu[k+1][j][i] * strz[k+1]; _t_100_ += mu[k+3][j][i] * strz[k+3]; b_muz4 -= 3.0 / 4.0 * _t_100_; _t_103_ = u_1[k+1][j][i-2]; _t_103_ -= u_1[k+1][j][i]; _t_102_ = b_mux1 * _t_103_; _t_104_ = u_1[k+1][j][i-1]; _t_104_ -= u_1[k+1][j][i]; _t_102_ += b_mux2 * _t_104_; _t_105_ = u_1[k+1][j][i+1]; _t_105_ -= u_1[k+1][j][i]; _t_102_ += b_mux3 * _t_105_; _t_106_ = u_1[k+1][j][i+2]; _t_106_ -= u_1[k+1][j][i]; _t_102_ += b_mux4 * _t_106_; _t_101_ = strx[i] * _t_102_; _t_108_ = 2.0 * b_muy1; _t_108_ += la[k+1][j-1][i] * stry[j-1]; _t_110_ = la[k+1][j][i] * stry[j]; _t_110_ += la[k+1][j-2][i] * stry[j-2]; _t_108_ -= 3.0 / 4.0 * _t_110_; _t_109_ = u_1[k+1][j-2][i]; _t_109_ -= u_1[k+1][j][i]; _t_107_ = _t_108_ * _t_109_; _t_111_ = 2.0 * b_muy2; _t_111_ += la[k+1][j-2][i] * stry[j-2]; _t_111_ += la[k+1][j+1][i] * stry[j+1]; _t_113_ = la[k+1][j][i] * stry[j]; _t_113_ += la[k+1][j-1][i] * stry[j-1]; _t_111_ += 3.0 * _t_113_; _t_112_ = u_1[k+1][j-1][i]; _t_112_ -= u_1[k+1][j][i]; _t_107_ += _t_111_ * _t_112_; _t_114_ = 2.0 * b_muy3; _t_114_ += la[k+1][j-1][i] * stry[j-1]; _t_114_ += la[k+1][j+2][i] * stry[j+2]; _t_116_ = la[k+1][j+1][i] * stry[j+1]; _t_116_ += la[k+1][j][i] * stry[j]; _t_114_ += 3.0 * _t_116_; _t_115_ = u_1[k+1][j+1][i]; _t_115_ -= u_1[k+1][j][i]; _t_107_ += _t_114_ * _t_115_; _t_117_ = 2.0 * b_muy4; _t_117_ += la[k+1][j+1][i] * stry[j+1]; _t_119_ = la[k+1][j][i] * stry[j]; _t_119_ += la[k+1][j+2][i] * stry[j+2]; _t_117_ -= 3.0 / 4.0 * _t_119_; _t_118_ = u_1[k+1][j+2][i]; _t_118_ -= u_1[k+1][j][i]; _t_107_ += _t_117_ * _t_118_; _t_101_ += stry[j] * _t_107_; _t_121_ = u_1[k-1][j][i]; _t_121_ -= u_1[k+1][j][i]; _t_120_ = b_muz1 * _t_121_; _t_122_ = u_1[k][j][i]; _t_122_ -= u_1[k+1][j][i]; _t_120_ += b_muz2 * _t_122_; _t_123_ = u_1[k+2][j][i]; _t_123_ -= u_1[k+1][j][i]; _t_120_ += b_muz3 * _t_123_; _t_124_ = u_1[k+3][j][i]; _t_124_ -= u_1[k+1][j][i]; _t_120_ += b_muz4 * _t_124_; _t_101_ += strz[k+1] * _t_120_; r3 = 1.0 / 6.0 * _t_101_; _t_128_ = strx[i] * stry[j]; _t_126_ = _t_128_ * 1.0 / 144.0; _t_129_ = u_0[k+1][j-2][i-2]; _t_129_ -= u_0[k+1][j+2][i-2]; _t_130_ = -u_0[k+1][j-1][i-2]; _t_130_ += u_0[k+1][j+1][i-2]; _t_129_ += 8.0 * _t_130_; _t_127_ = mu[k+1][j][i-2] * _t_129_; _t_132_ = u_0[k+1][j-2][i-1]; _t_132_ -= u_0[k+1][j+2][i-1]; _t_133_ = -u_0[k+1][j-1][i-1]; _t_133_ += u_0[k+1][j+1][i-1]; _t_132_ += 8.0 * _t_133_; _t_131_ = mu[k+1][j][i-1] * _t_132_; _t_127_ -= 8.0 * _t_131_; _t_135_ = u_0[k+1][j-2][i+1]; _t_135_ -= u_0[k+1][j+2][i+1]; _t_136_ = -u_0[k+1][j-1][i+1]; _t_136_ += u_0[k+1][j+1][i+1]; _t_135_ += 8.0 * _t_136_; _t_134_ = mu[k+1][j][i+1] * _t_135_; _t_127_ += 8.0 * _t_134_; _t_137_ = u_0[k+1][j-2][i+2]; _t_137_ -= u_0[k+1][j+2][i+2]; _t_138_ = -u_0[k+1][j-1][i+2]; _t_138_ += u_0[k+1][j+1][i+2]; _t_137_ += 8.0 * _t_138_; _t_127_ -= mu[k+1][j][i+2] * _t_137_; _t_125_ = _t_126_ * _t_127_; _t_141_ = strx[i] * stry[j]; _t_139_ = _t_141_ * 1.0 / 144.0; _t_142_ = u_0[k+1][j-2][i-2]; _t_142_ -= u_0[k+1][j-2][i+2]; _t_143_ = -u_0[k+1][j-2][i-1]; _t_143_ += u_0[k+1][j-2][i+1]; _t_142_ += 8.0 * _t_143_; _t_140_ = la[k+1][j-2][i] * _t_142_; _t_145_ = u_0[k+1][j-1][i-2]; _t_145_ -= u_0[k+1][j-1][i+2]; _t_146_ = -u_0[k+1][j-1][i-1]; _t_146_ += u_0[k+1][j-1][i+1]; _t_145_ += 8.0 * _t_146_; _t_144_ = la[k+1][j-1][i] * _t_145_; _t_140_ -= 8.0 * _t_144_; _t_148_ = u_0[k+1][j+1][i-2]; _t_148_ -= u_0[k+1][j+1][i+2]; _t_149_ = -u_0[k+1][j+1][i-1]; _t_149_ += u_0[k+1][j+1][i+1]; _t_148_ += 8.0 * _t_149_; _t_147_ = la[k+1][j+1][i] * _t_148_; _t_140_ += 8.0 * _t_147_; _t_150_ = u_0[k+1][j+2][i-2]; _t_150_ -= u_0[k+1][j+2][i+2]; _t_151_ = -u_0[k+1][j+2][i-1]; _t_151_ += u_0[k+1][j+2][i+1]; _t_150_ += 8.0 * _t_151_; _t_140_ -= la[k+1][j+2][i] * _t_150_; _t_125_ += _t_139_ * _t_140_; _t_154_ = stry[j] * strz[k+1]; _t_152_ = _t_154_ * 1.0 / 144.0; _t_155_ = u_2[k-1][j-2][i]; _t_155_ -= u_2[k+3][j-2][i]; _t_156_ = -u_2[k][j-2][i]; _t_156_ += u_2[k+2][j-2][i]; _t_155_ += 8.0 * _t_156_; _t_153_ = la[k+1][j-2][i] * _t_155_; _t_158_ = u_2[k-1][j-1][i]; _t_158_ -= u_2[k+3][j-1][i]; _t_159_ = -u_2[k][j-1][i]; _t_159_ += u_2[k+2][j-1][i]; _t_158_ += 8.0 * _t_159_; _t_157_ = la[k+1][j-1][i] * _t_158_; _t_153_ -= 8.0 * _t_157_; _t_161_ = u_2[k-1][j+1][i]; _t_161_ -= u_2[k+3][j+1][i]; _t_162_ = -u_2[k][j+1][i]; _t_162_ += u_2[k+2][j+1][i]; _t_161_ += 8.0 * _t_162_; _t_160_ = la[k+1][j+1][i] * _t_161_; _t_153_ += 8.0 * _t_160_; _t_163_ = u_2[k-1][j+2][i]; _t_163_ -= u_2[k+3][j+2][i]; _t_164_ = -u_2[k][j+2][i]; _t_164_ += u_2[k+2][j+2][i]; _t_163_ += 8.0 * _t_164_; _t_153_ -= la[k+1][j+2][i] * _t_163_; _t_125_ += _t_152_ * _t_153_; _t_167_ = stry[j] * strz[k+1]; _t_165_ = _t_167_ * 1.0 / 144.0; _t_168_ = u_2[k-1][j-2][i]; _t_168_ -= u_2[k-1][j+2][i]; _t_169_ = -u_2[k-1][j-1][i]; _t_169_ += u_2[k-1][j+1][i]; _t_168_ += 8.0 * _t_169_; _t_166_ = mu[k-1][j][i] * _t_168_; _t_171_ = u_2[k][j-2][i]; _t_171_ -= u_2[k][j+2][i]; _t_172_ = -u_2[k][j-1][i]; _t_172_ += u_2[k][j+1][i]; _t_171_ += 8.0 * _t_172_; _t_170_ = mu[k][j][i] * _t_171_; _t_166_ -= 8.0 * _t_170_; _t_174_ = u_2[k+2][j-2][i]; _t_174_ -= u_2[k+2][j+2][i]; _t_175_ = -u_2[k+2][j-1][i]; _t_175_ += u_2[k+2][j+1][i]; _t_174_ += 8.0 * _t_175_; _t_173_ = mu[k+2][j][i] * _t_174_; _t_166_ += 8.0 * _t_173_; _t_176_ = u_2[k+3][j-2][i]; _t_176_ -= u_2[k+3][j+2][i]; _t_177_ = -u_2[k+3][j-1][i]; _t_177_ += u_2[k+3][j+1][i]; _t_176_ += 8.0 * _t_177_; _t_166_ -= mu[k+3][j][i] * _t_176_; _t_125_ += _t_165_ * _t_166_; r3 += _t_125_; uacc_1kp1jc0ic0 = a1 * uacc_1kp1jc0ic0; uacc_1kp1jc0ic0 += cof * r3; uacc_1[(k+1)*N*N+j*N+i] = uacc_1kp1jc0ic0; } } } __global__ void __launch_bounds__ (128,2) sw4_c (double * uacc_2, double * __restrict__ u_0, double * __restrict__ u_1, double * __restrict__ u_2, double * __restrict__ mu, double * __restrict__ la, double * __restrict__ strx, double * __restrict__ stry, double * __restrict__ strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); /* Total 687 flops */ if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 10 for (int k=2; k<=N-3; k++) { /* 28 * 3 = 84 flops */ double mux1 = mu[k*N*N+j*N+i-1] * strx[i-1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strx[i] + mu[k*N*N+j*N+i-2] * strx[i-2]); double mux2 = mu[k*N*N+j*N+i-2] * strx[i-2] + mu[k*N*N+j*N+i+1] * strx[i+1] + 3 * (mu[k*N*N+j*N+i] * strx[i] + mu[k*N*N+j*N+i-1] * strx[i-1]); double mux3 = mu[k*N*N+j*N+i-1] * strx[i-1] + mu[k*N*N+j*N+i+2] * strx[i+2] + 3 * (mu[k*N*N+j*N+i+1] * strx[i+1] + mu[k*N*N+j*N+i] * strx[i]); double mux4 = mu[k*N*N+j*N+i+1] * strx[i+1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strx[i] + mu[k*N*N+j*N+i+2] * strx[i+2]); double muy1 = mu[k*N*N+(j-1)*N+i] * stry[j-1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * stry[j] + mu[k*N*N+(j-2)*N+i] * stry[j-2]); double muy2 = mu[k*N*N+(j-2)*N+i] * stry[j-2] + mu[k*N*N+(j+1)*N+i] * stry[j+1] + 3 * (mu[k*N*N+j*N+i] * stry[j] + mu[k*N*N+(j-1)*N+i] * stry[j-1]); double muy3 = mu[k*N*N+(j-1)*N+i] * stry[j-1] + mu[k*N*N+(j+2)*N+i] * stry[j+2] + 3 * (mu[k*N*N+(j+1)*N+i] * stry[j+1] + mu[k*N*N+j*N+i] * stry[j]); double muy4 = mu[k*N*N+(j+1)*N+i] * stry[j+1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * stry[j] + mu[k*N*N+(j+2)*N+i] * stry[j+2]); double muz1 = mu[(k-1)*N*N+j*N+i] * strz[k-1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strz[k] + mu[(k-2)*N*N+j*N+i] * strz[k-2]); double muz2 = mu[(k-2)*N*N+j*N+i] * strz[k-2] + mu[(k+1)*N*N+j*N+i] * strz[k+1] + 3 * (mu[k*N*N+j*N+i] * strz[k] + mu[(k-1)*N*N+j*N+i] * strz[k-1]); double muz3 = mu[(k-1)*N*N+j*N+i] * strz[k-1] + mu[(k+2)*N*N+j*N+i] * strz[k+2] + 3 * (mu[(k+1)*N*N+j*N+i] * strz[k+1] + mu[k*N*N+j*N+i] * strz[k]); double muz4 = mu[(k+1)*N*N+j*N+i] * strz[k+1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strz[k] + mu[(k+2)*N*N+j*N+i] * strz[k+2]); double r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k*N*N+j*N+i-2] - u_2[k*N*N+j*N+i]) + mux2 * (u_2[k*N*N+j*N+i-1] - u_2[k*N*N+j*N+i]) + mux3 * (u_2[k*N*N+j*N+i+1] - u_2[k*N*N+j*N+i]) + mux4 * (u_2[k*N*N+j*N+i+2] - u_2[k*N*N+j*N+i])) + stry[j] * (muy1 * (u_2[k*N*N+(j-2)*N+i] - u_2[k*N*N+j*N+i]) + muy2 * (u_2[k*N*N+(j-1)*N+i] - u_2[k*N*N+j*N+i]) + muy3 * (u_2[k*N*N+(j+1)*N+i] - u_2[k*N*N+j*N+i]) + muy4 * (u_2[k*N*N+(j+2)*N+i] - u_2[k*N*N+j*N+i])) + strz[k] * ((2 * muz1 + la[(k-1)*N*N+j*N+i] * strz[k-1] - 3e0 / 4 * (la[k*N*N+j*N+i] * strz[k] + la[(k-2)*N*N+j*N+i] * strz[k-2])) * (u_2[(k-2)*N*N+j*N+i] - u_2[k*N*N+j*N+i]) + (2 * muz2 + la[(k-2)*N*N+j*N+i] * strz[k-2] + la[(k+1)*N*N+j*N+i] * strz[k+1] + 3 * (la[k*N*N+j*N+i] * strz[k] + la[(k-1)*N*N+j*N+i] * strz[k-1])) * (u_2[(k-1)*N*N+j*N+i] - u_2[k*N*N+j*N+i]) + (2 * muz3 + la[(k-1)*N*N+j*N+i] * strz[k-1] + la[(k+2)*N*N+j*N+i] * strz[k+2] + 3 * (la[(k+1)*N*N+j*N+i] * strz[k+1] + la[k*N*N+j*N+i] * strz[k])) * (u_2[(k+1)*N*N+j*N+i] - u_2[k*N*N+j*N+i]) + (2 * muz4 + la[(k+1)*N*N+j*N+i] * strz[k+1] - 3e0 / 4 * (la[k*N*N+j*N+i] * strz[k] + la[(k+2)*N*N+j*N+i] * strz[k+2])) * (u_2[(k+2)*N*N+j*N+i] - u_2[k*N*N+j*N+i]))); r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k*N*N+j*N+i-2] * (u_0[(k-2)*N*N+j*N+i-2] - u_0[(k+2)*N*N+j*N+i-2] + 8 * (-u_0[(k-1)*N*N+j*N+i-2] + u_0[(k+1)*N*N+j*N+i-2])) - 8 * (mu[k*N*N+j*N+i-1] * (u_0[(k-2)*N*N+j*N+i-1] - u_0[(k+2)*N*N+j*N+i-1] + 8 * (-u_0[(k-1)*N*N+j*N+i-1] + u_0[(k+1)*N*N+j*N+i-1]))) + 8 * (mu[k*N*N+j*N+i+1] * (u_0[(k-2)*N*N+j*N+i+1] - u_0[(k+2)*N*N+j*N+i+1] + 8 * (-u_0[(k-1)*N*N+j*N+i+1] + u_0[(k+1)*N*N+j*N+i+1]))) - (mu[k*N*N+j*N+i+2] * (u_0[(k-2)*N*N+j*N+i+2] - u_0[(k+2)*N*N+j*N+i+2] + 8 * (-u_0[(k-1)*N*N+j*N+i+2] + u_0[(k+1)*N*N+j*N+i+2])))); r3 += strx[i] * strz[k] * (1e0 / 144) * (la[(k-2)*N*N+j*N+i] * (u_0[(k-2)*N*N+j*N+i-2] - u_0[(k-2)*N*N+j*N+i+2] + 8 * (-u_0[(k-2)*N*N+j*N+i-1] + u_0[(k-2)*N*N+j*N+i+1])) - 8 * (la[(k-1)*N*N+j*N+i] * (u_0[(k-1)*N*N+j*N+i-2] - u_0[(k-1)*N*N+j*N+i+2] + 8 * (-u_0[(k-1)*N*N+j*N+i-1] + u_0[(k-1)*N*N+j*N+i+1]))) + 8 * (la[(k+1)*N*N+j*N+i] * (u_0[(k+1)*N*N+j*N+i-2] - u_0[(k+1)*N*N+j*N+i+2] + 8 * (-u_0[(k+1)*N*N+j*N+i-1] + u_0[(k+1)*N*N+j*N+i+1]))) - (la[(k+2)*N*N+j*N+i] * (u_0[(k+2)*N*N+j*N+i-2] - u_0[(k+2)*N*N+j*N+i+2] + 8 * (-u_0[(k+2)*N*N+j*N+i-1] + u_0[(k+2)*N*N+j*N+i+1])))); r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k*N*N+(j-2)*N+i] * (u_1[(k-2)*N*N+(j-2)*N+i] - u_1[(k+2)*N*N+(j-2)*N+i] + 8 * (-u_1[(k-1)*N*N+(j-2)*N+i] + u_1[(k+1)*N*N+(j-2)*N+i])) - 8 * (mu[k*N*N+(j-1)*N+i] * (u_1[(k-2)*N*N+(j-1)*N+i] - u_1[(k+2)*N*N+(j-1)*N+i] + 8 * (-u_1[(k-1)*N*N+(j-1)*N+i] + u_1[(k+1)*N*N+(j-1)*N+i]))) + 8 * (mu[k*N*N+(j+1)*N+i] * (u_1[(k-2)*N*N+(j+1)*N+i] - u_1[(k+2)*N*N+(j+1)*N+i] + 8 * (-u_1[(k-1)*N*N+(j+1)*N+i] + u_1[(k+1)*N*N+(j+1)*N+i]))) - (mu[k*N*N+(j+2)*N+i] * (u_1[(k-2)*N*N+(j+2)*N+i] - u_1[(k+2)*N*N+(j+2)*N+i] + 8 * (-u_1[(k-1)*N*N+(j+2)*N+i] + u_1[(k+1)*N*N+(j+2)*N+i])))); r3 += stry[j] * strz[k] * (1e0 / 144) * (la[(k-2)*N*N+j*N+i] * (u_1[(k-2)*N*N+(j-2)*N+i] - u_1[(k-2)*N*N+(j+2)*N+i] + 8 * (-u_1[(k-2)*N*N+(j-1)*N+i] + u_1[(k-2)*N*N+(j+1)*N+i])) - 8 * (la[(k-1)*N*N+j*N+i] * (u_1[(k-1)*N*N+(j-2)*N+i] - u_1[(k-1)*N*N+(j+2)*N+i] + 8 * (-u_1[(k-1)*N*N+(j-1)*N+i] + u_1[(k-1)*N*N+(j+1)*N+i]))) + 8 * (la[(k+1)*N*N+j*N+i] * (u_1[(k+1)*N*N+(j-2)*N+i] - u_1[(k+1)*N*N+(j+2)*N+i] + 8 * (-u_1[(k+1)*N*N+(j-1)*N+i] + u_1[(k+1)*N*N+(j+1)*N+i]))) - (la[(k+2)*N*N+j*N+i] * (u_1[(k+2)*N*N+(j-2)*N+i] - u_1[(k+2)*N*N+(j+2)*N+i] + 8 * (-u_1[(k+2)*N*N+(j-1)*N+i] + u_1[(k+2)*N*N+(j+1)*N+i])))); /* 3 * 3 = 9 flops */ uacc_2[k*N*N+j*N+i] = a1 * uacc_2[k*N*N+j*N+i] + cof * r3; } } } extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) { double *uacc_0; cudaMalloc (&uacc_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_0\n"); cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *uacc_1; cudaMalloc (&uacc_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_1\n"); cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *uacc_2; cudaMalloc (&uacc_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_2\n"); cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u_0; cudaMalloc (&u_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_0\n"); cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u_1; cudaMalloc (&u_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_1\n"); cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u_2; cudaMalloc (&u_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_2\n"); cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *mu; cudaMalloc (&mu, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *la; cudaMalloc (&la, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for la\n"); cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *strx; cudaMalloc (&strx, sizeof(double)*N); check_error ("Failed to allocate device memory for strx\n"); cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice); double *stry; cudaMalloc (&stry, sizeof(double)*N); check_error ("Failed to allocate device memory for stry\n"); cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice); double *strz; cudaMalloc (&strz, sizeof(double)*N); check_error ("Failed to allocate device memory for strz\n"); cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice); dim3 blockconfig_a (16, 8, 1); dim3 gridconfig_a (ceil(N, blockconfig_a.x), ceil(N, blockconfig_a.y), 1); sw4_a <<<gridconfig_a, blockconfig_a>>> (uacc_0, u_0, u_1, u_2, mu, la, strx, stry, strz, N); dim3 blockconfig_b (16, 8, 1); dim3 gridconfig_b (ceil(N, blockconfig_b.x), ceil(N, blockconfig_b.y), 1); sw4_b <<<gridconfig_b, blockconfig_b>>> (uacc_1, u_0, u_1, u_2, mu, la, strx, stry, strz, N); dim3 blockconfig_c (16, 8, 1); dim3 gridconfig_c (ceil(N, blockconfig_c.x), ceil(N, blockconfig_c.y), 1); sw4_c <<<gridconfig_c, blockconfig_c>>> (uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N); cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaFree (uacc_0); cudaFree (uacc_1); cudaFree (uacc_2); cudaFree (u_0); cudaFree (u_1); cudaFree (u_2); cudaFree (mu); cudaFree (la); cudaFree (strx); cudaFree (stry); cudaFree (strz); }
b2531477b755b95311677bc0ab9528cc9bd80658.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ---------------------------------------------------------------------------- // Numerical diagonalization of 3x3 matrcies // Copyright (C) 2006 Joachim Kopp // ---------------------------------------------------------------------------- // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA // ---------------------------------------------------------------------------- #include <stdio.h> #include <math.h> #include <float.h> // Macros #define SQR(x) ((x)*(x)) // x^2 #define SQR_ABS(x) (SQR(cfloat(x)) + SQR(cimag(x))) // |x|^2 // Constants #define M_SQRT3 1.73205080756887729352744634151 // sqrt(3) // calculates eigenvalues of 2x2 float symmetric matrix __device__ void dsyevc2(float A, float B, float C, float *rt1, float *rt2){ float sm = A + C; float df = A - C; float rt = sqrt(SQR(df) + 4.0*B*B); float t; if (sm > 0.0) { *rt1 = 0.5 * (sm + rt); t = 1.0/(*rt1); *rt2 = (A*t)*C - (B*t)*B; } else if (sm < 0.0) { *rt2 = 0.5 * (sm - rt); t = 1.0/(*rt2); *rt1 = (A*t)*C - (B*t)*B; } else // This case needs to be treated separately to avoid div by 0 { *rt1 = 0.5 * rt; *rt2 = -0.5 * rt; } } // ---------------------------------------------------------------------------- __device__ void dsyev2(float A, float B, float C, float *rt1, float *rt2, float *cs, float *sn) // ---------------------------------------------------------------------------- // Calculates the eigensystem of a float symmetric 2x2 matrix // [ A B ] // [ B C ] // in the form // [ A B ] = [ cs -sn ] [ rt1 0 ] [ cs sn ] // [ B C ] [ sn cs ] [ 0 rt2 ] [ -sn cs ] // where rt1 >= rt2. Note that this convention is different from the one used // in the LAPACK routine DLAEV2, where |rt1| >= |rt2|. // ---------------------------------------------------------------------------- { float sm = A + C; float df = A - C; float rt = sqrt(SQR(df) + 4.0*B*B); float t; if (sm > 0.0) { *rt1 = 0.5 * (sm + rt); t = 1.0/(*rt1); *rt2 = (A*t)*C - (B*t)*B; } else if (sm < 0.0) { *rt2 = 0.5 * (sm - rt); t = 1.0/(*rt2); *rt1 = (A*t)*C - (B*t)*B; } else // This case needs to be treated separately to avoid div by 0 { *rt1 = 0.5 * rt; *rt2 = -0.5 * rt; } // Calculate eigenvectors if (df > 0.0) *cs = df + rt; else *cs = df - rt; if (fabs(*cs) > 2.0*fabs(B)) { t = -2.0 * B / *cs; *sn = 1.0 / sqrt(1.0 + SQR(t)); *cs = t * (*sn); } else if (fabs(B) == 0.0) { *cs = 1.0; *sn = 0.0; } else { t = -0.5 * (*cs) / B; *cs = 1.0 / sqrt(1.0 + SQR(t)); *sn = t * (*cs); } if (df > 0.0) { t = *cs; *cs = -(*sn); *sn = t; } } // ---------------------------------------------------------------------------- __device__ int dsyevc3(float A[3][3], float w[3]) // ---------------------------------------------------------------------------- // Calculates the eigenvalues of a symmetric 3x3 matrix A using Cardano's // analytical algorithm. // Only the diagonal and upper triangular parts of A are accessed. The access // is read-only. // ---------------------------------------------------------------------------- // Parameters: // A: The symmetric input matrix // w: Storage buffer for eigenvalues // ---------------------------------------------------------------------------- // Return value: // 0: Success // -1: Error // ---------------------------------------------------------------------------- { float m, c1, c0; // Determine coefficients of characteristic poynomial. We write // | a d f | // A = | d* b e | // | f* e* c | float de = A[0][1] * A[1][2]; // d * e float dd = SQR(A[0][1]); // d^2 float ee = SQR(A[1][2]); // e^2 float ff = SQR(A[0][2]); // f^2 m = A[0][0] + A[1][1] + A[2][2]; c1 = (A[0][0]*A[1][1] + A[0][0]*A[2][2] + A[1][1]*A[2][2]) // a*b + a*c + b*c - d^2 - e^2 - f^2 - (dd + ee + ff); c0 = A[2][2]*dd + A[0][0]*ee + A[1][1]*ff - A[0][0]*A[1][1]*A[2][2] - 2.0 * A[0][2]*de; // c*d^2 + a*e^2 + b*f^2 - a*b*c - 2*f*d*e) float p, sqrt_p, q, c, s, phi; p = SQR(m) - 3.0*c1; q = m*(p - (3.0/2.0)*c1) - (27.0/2.0)*c0; sqrt_p = sqrt(fabs(p)); phi = 27.0 * ( 0.25*SQR(c1)*(p - c1) + c0*(q + 27.0/4.0*c0)); phi = (1.0/3.0) * atan2(sqrt(fabs(phi)), q); c = sqrt_p*cos(phi); s = (1.0/M_SQRT3)*sqrt_p*sin(phi); w[0] = (1.0/3.0)*(m - c); w[1] = w[0] + s; w[2] = w[0] + c; w[0] -= s; return 0; } // ---------------------------------------------------------------------------- __device__ int dsyevv3(float A[3][3], float Q[3][3], float w[3]) // ---------------------------------------------------------------------------- // Calculates the eigenvalues and normalized eigenvectors of a symmetric 3x3 // matrix A using Cardano's method for the eigenvalues and an analytical // method based on vector cross products for the eigenvectors. // Only the diagonal and upper triangular parts of A need to contain meaningful // values. However, all of A may be used as temporary storage and may hence be // destroyed. // ---------------------------------------------------------------------------- // Parameters: // A: The symmetric input matrix // Q: Storage buffer for eigenvectors // w: Storage buffer for eigenvalues // ---------------------------------------------------------------------------- // Return value: // 0: Success // -1: Error // ---------------------------------------------------------------------------- // Dependencies: // dsyevc3() // ---------------------------------------------------------------------------- // Version history: // v1.1 (12 Mar 2012): Removed access to lower triangualr part of A // (according to the documentation, only the upper triangular part needs // to be filled) // v1.0: First released version // ---------------------------------------------------------------------------- { #ifndef EVALS_ONLY float norm; // Squared norm or inverse norm of current eigenvector float n0, n1; // Norm of first and second columns of A float n0tmp, n1tmp; // "Templates" for the calculation of n0/n1 - saves a few FLOPS float thresh; // Small number used as threshold for floating point comparisons float error; // Estimated maximum roundoff error in some steps float wmax; // The eigenvalue of maximum modulus float f, t; // Intermediate storage int i, j; // Loop counters #endif // Calculate eigenvalues dsyevc3(A, w); #ifndef EVALS_ONLY wmax = fabs(w[0]); if ((t=fabs(w[1])) > wmax) wmax = t; if ((t=fabs(w[2])) > wmax) wmax = t; thresh = SQR(8.0 * DBL_EPSILON * wmax); // Prepare calculation of eigenvectors n0tmp = SQR(A[0][1]) + SQR(A[0][2]); n1tmp = SQR(A[0][1]) + SQR(A[1][2]); Q[0][1] = A[0][1]*A[1][2] - A[0][2]*A[1][1]; Q[1][1] = A[0][2]*A[0][1] - A[1][2]*A[0][0]; Q[2][1] = SQR(A[0][1]); // Calculate first eigenvector by the formula // v[0] = (A - w[0]).e1 x (A - w[0]).e2 A[0][0] -= w[0]; A[1][1] -= w[0]; Q[0][0] = Q[0][1] + A[0][2]*w[0]; Q[1][0] = Q[1][1] + A[1][2]*w[0]; Q[2][0] = A[0][0]*A[1][1] - Q[2][1]; norm = SQR(Q[0][0]) + SQR(Q[1][0]) + SQR(Q[2][0]); n0 = n0tmp + SQR(A[0][0]); n1 = n1tmp + SQR(A[1][1]); error = n0 * n1; if (n0 <= thresh) // If the first column is zero, then (1,0,0) is an eigenvector { Q[0][0] = 1.0; Q[1][0] = 0.0; Q[2][0] = 0.0; } else if (n1 <= thresh) // If the second column is zero, then (0,1,0) is an eigenvector { Q[0][0] = 0.0; Q[1][0] = 1.0; Q[2][0] = 0.0; } else if (norm < SQR(64.0 * DBL_EPSILON) * error) { // If angle between A[0] and A[1] is too small, don't use t = SQR(A[0][1]); // cross product, but calculate v ~ (1, -A0/A1, 0) f = -A[0][0] / A[0][1]; if (SQR(A[1][1]) > t) { t = SQR(A[1][1]); f = -A[0][1] / A[1][1]; } if (SQR(A[1][2]) > t) f = -A[0][2] / A[1][2]; norm = 1.0/sqrt(1 + SQR(f)); Q[0][0] = norm; Q[1][0] = f * norm; Q[2][0] = 0.0; } else // This is the standard branch { norm = sqrt(1.0 / norm); for (j=0; j < 3; j++) Q[j][0] = Q[j][0] * norm; } // Prepare calculation of second eigenvector t = w[0] - w[1]; if (fabs(t) > 8.0 * DBL_EPSILON * wmax) { // For non-degenerate eigenvalue, calculate second eigenvector by the formula // v[1] = (A - w[1]).e1 x (A - w[1]).e2 A[0][0] += t; A[1][1] += t; Q[0][1] = Q[0][1] + A[0][2]*w[1]; Q[1][1] = Q[1][1] + A[1][2]*w[1]; Q[2][1] = A[0][0]*A[1][1] - Q[2][1]; norm = SQR(Q[0][1]) + SQR(Q[1][1]) + SQR(Q[2][1]); n0 = n0tmp + SQR(A[0][0]); n1 = n1tmp + SQR(A[1][1]); error = n0 * n1; if (n0 <= thresh) // If the first column is zero, then (1,0,0) is an eigenvector { Q[0][1] = 1.0; Q[1][1] = 0.0; Q[2][1] = 0.0; } else if (n1 <= thresh) // If the second column is zero, then (0,1,0) is an eigenvector { Q[0][1] = 0.0; Q[1][1] = 1.0; Q[2][1] = 0.0; } else if (norm < SQR(64.0 * DBL_EPSILON) * error) { // If angle between A[0] and A[1] is too small, don't use t = SQR(A[0][1]); // cross product, but calculate v ~ (1, -A0/A1, 0) f = -A[0][0] / A[0][1]; if (SQR(A[1][1]) > t) { t = SQR(A[1][1]); f = -A[0][1] / A[1][1]; } if (SQR(A[1][2]) > t) f = -A[0][2] / A[1][2]; norm = 1.0/sqrt(1 + SQR(f)); Q[0][1] = norm; Q[1][1] = f * norm; Q[2][1] = 0.0; } else { norm = sqrt(1.0 / norm); for (j=0; j < 3; j++) Q[j][1] = Q[j][1] * norm; } } else { // For degenerate eigenvalue, calculate second eigenvector according to // v[1] = v[0] x (A - w[1]).e[i] // // This would floatly get to complicated if we could not assume all of A to // contain meaningful values. A[1][0] = A[0][1]; A[2][0] = A[0][2]; A[2][1] = A[1][2]; A[0][0] += w[0]; A[1][1] += w[0]; for (i=0; i < 3; i++) { A[i][i] -= w[1]; n0 = SQR(A[0][i]) + SQR(A[1][i]) + SQR(A[2][i]); if (n0 > thresh) { Q[0][1] = Q[1][0]*A[2][i] - Q[2][0]*A[1][i]; Q[1][1] = Q[2][0]*A[0][i] - Q[0][0]*A[2][i]; Q[2][1] = Q[0][0]*A[1][i] - Q[1][0]*A[0][i]; norm = SQR(Q[0][1]) + SQR(Q[1][1]) + SQR(Q[2][1]); if (norm > SQR(256.0 * DBL_EPSILON) * n0) // Accept cross product only if the angle between { // the two vectors was not too small norm = sqrt(1.0 / norm); for (j=0; j < 3; j++) Q[j][1] = Q[j][1] * norm; break; } } } if (i == 3) // This means that any vector orthogonal to v[0] is an EV. { for (j=0; j < 3; j++) if (Q[j][0] != 0.0) // Find nonzero element of v[0] ... { // ... and swap it with the next one norm = 1.0 / sqrt(SQR(Q[j][0]) + SQR(Q[(j+1)%3][0])); Q[j][1] = Q[(j+1)%3][0] * norm; Q[(j+1)%3][1] = -Q[j][0] * norm; Q[(j+2)%3][1] = 0.0; break; } } } // Calculate third eigenvector according to // v[2] = v[0] x v[1] Q[0][2] = Q[1][0]*Q[2][1] - Q[2][0]*Q[1][1]; Q[1][2] = Q[2][0]*Q[0][1] - Q[0][0]*Q[2][1]; Q[2][2] = Q[0][0]*Q[1][1] - Q[1][0]*Q[0][1]; #endif return 0; } // ---------------------------------------------------------------------------- __device__ void dsytrd3(float A[3][3], float Q[3][3], float d[3], float e[2]) // ---------------------------------------------------------------------------- // Reduces a symmetric 3x3 matrix to tridiagonal form by applying // (unitary) Householder transformations: // [ d[0] e[0] ] // A = Q . [ e[0] d[1] e[1] ] . Q^T // [ e[1] d[2] ] // The function accesses only the diagonal and upper triangular parts of // A. The access is read-only. // --------------------------------------------------------------------------- { const int n = 3; float u[n], q[n]; float omega, f; float K, h, g; // Initialize Q to the identitity matrix #ifndef EVALS_ONLY for (int i=0; i < n; i++) { Q[i][i] = 1.0; for (int j=0; j < i; j++) Q[i][j] = Q[j][i] = 0.0; } #endif // Bring first row and column to the desired form h = SQR(A[0][1]) + SQR(A[0][2]); if (A[0][1] > 0) g = -sqrt(h); else g = sqrt(h); e[0] = g; f = g * A[0][1]; u[1] = A[0][1] - g; u[2] = A[0][2]; omega = h - f; if (omega > 0.0) { omega = 1.0 / omega; K = 0.0; for (int i=1; i < n; i++) { f = A[1][i] * u[1] + A[i][2] * u[2]; q[i] = omega * f; // p K += u[i] * f; // u* A u } K *= 0.5 * SQR(omega); for (int i=1; i < n; i++) q[i] = q[i] - K * u[i]; d[0] = A[0][0]; d[1] = A[1][1] - 2.0*q[1]*u[1]; d[2] = A[2][2] - 2.0*q[2]*u[2]; // Store inverse Householder transformation in Q #ifndef EVALS_ONLY for (int j=1; j < n; j++) { f = omega * u[j]; for (int i=1; i < n; i++) Q[i][j] = Q[i][j] - f*u[i]; } #endif // Calculate updated A[1][2] and store it in e[1] e[1] = A[1][2] - q[1]*u[2] - u[1]*q[2]; } else { for (int i=0; i < n; i++) d[i] = A[i][i]; e[1] = A[1][2]; } } // ---------------------------------------------------------------------------- __device__ int dsyevq3(float A[3][3], float Q[3][3], float w[3]) // ---------------------------------------------------------------------------- // Calculates the eigenvalues and normalized eigenvectors of a symmetric 3x3 // matrix A using the QL algorithm with implicit shifts, preceded by a // Householder reduction to tridiagonal form. // The function accesses only the diagonal and upper triangular parts of A. // The access is read-only. // ---------------------------------------------------------------------------- // Parameters: // A: The symmetric input matrix // Q: Storage buffer for eigenvectors // w: Storage buffer for eigenvalues // ---------------------------------------------------------------------------- // Return value: // 0: Success // -1: Error (no convergence) // ---------------------------------------------------------------------------- // Dependencies: // dsytrd3() // ---------------------------------------------------------------------------- { const int n = 3; float e[3]; // The third element is used only as temporary workspace float g, r, p, f, b, s, c, t; // Intermediate storage int nIter; int m; // Transform A to float tridiagonal form by the Householder method dsytrd3(A, Q, w, e); // Calculate eigensystem of the remaining float symmetric tridiagonal matrix // with the QL method // // Loop over all off-diagonal elements for (int l=0; l < n-1; l++) { nIter = 0; while (1) { // Check for convergence and exit iteration loop if off-diagonal // element e(l) is zero for (m=l; m <= n-2; m++) { g = fabs(w[m])+fabs(w[m+1]); if (fabs(e[m]) + g == g) break; } if (m == l) break; if (nIter++ >= 30) return -1; // Calculate g = d_m - k g = (w[l+1] - w[l]) / (e[l] + e[l]); r = sqrt(SQR(g) + 1.0); if (g > 0) g = w[m] - w[l] + e[l]/(g + r); else g = w[m] - w[l] + e[l]/(g - r); s = c = 1.0; p = 0.0; for (int i=m-1; i >= l; i--) { f = s * e[i]; b = c * e[i]; if (fabs(f) > fabs(g)) { c = g / f; r = sqrt(SQR(c) + 1.0); e[i+1] = f * r; c *= (s = 1.0/r); } else { s = f / g; r = sqrt(SQR(s) + 1.0); e[i+1] = g * r; s *= (c = 1.0/r); } g = w[i+1] - p; r = (w[i] - g)*s + 2.0*c*b; p = s * r; w[i+1] = g + p; g = c*r - b; // Form eigenvectors #ifndef EVALS_ONLY for (int k=0; k < n; k++) { t = Q[k][i+1]; Q[k][i+1] = s*Q[k][i] + c*t; Q[k][i] = c*Q[k][i] - s*t; } #endif } w[l] -= p; e[l] = g; e[m] = 0.0; } } return 0; } /////// __global__ interface,column major for matlab __device__ void eig2(const float* M, float* V, float* L){ dsyev2(M[0],M[1],M[3],&L[3],&L[0],&V[1],&V[3]); V[2]=V[1]; V[0]=-V[3]; L[1]=L[2]=0; } __device__ void eig3(const float* M, float* V, float* L,bool useIterative=false){ float A[3][3] = {{M[0],M[1],M[2]}, {M[3],M[4],M[5]}, {M[6],M[7],M[8]} }; float Q[3][3] = { {0,0,0}, {0,0,0}, {0,0,0} }; float LL[3] = {0,0,0}; int conv=0; if(useIterative){ conv=dsyevq3(A,Q,LL); }else{ conv=dsyevv3(A,Q,LL); } if(conv<0){ L[0]=-1; return; } L[0]=LL[0]; L[4]=LL[1]; L[8]=LL[2]; V[0]=Q[0][0]; V[1]=Q[1][0]; V[2]=Q[2][0]; V[3]=Q[0][1]; V[4]=Q[1][1]; V[5]=Q[2][1]; V[6]=Q[0][2]; V[7]=Q[1][2]; V[8]=Q[2][2]; } __global__ void eig(const float* M, float* V, float* L, const int n, bool useIterative=false){ if(n==2){ eig2(M,V,L); }else if(n==3){ eig3(M,V,L,useIterative); } } __global__ void eigVal(const float* M, float* L, const int n){ if(n==2){ dsyevc2(M[0],M[1],M[3],&L[1],&L[0]); }else if (n==3){ float A[3][3] = {{M[0],M[1],M[2]}, {M[3],M[4],M[5]}, {M[6],M[7],M[8]} }; dsyevc3(A,L); } } #include "cutil_math.h" #include "cutil_math2.h" __global__ void estimateNormals(const float* pts1, float* nor1,const int n, const float neighRadius){ size_t x = threadIdx.x + blockDim.x * blockIdx.x; if(x>=n) return; const float3* pts = (float3*) pts1; float3* nor = (float3*) nor1; float3 m = make_float3(0,0,0); float C_2d[3][3]={{0,0,0},{0,0,0},{0,0,0}}; float* C = (float*) C_2d; //rowwise int nN=0; for(int i=0; i<n;i++){ //if(i==x) continue; float3 diff3 = pts[i]-pts[x]; float norm = sqrt(dot(diff3,diff3)); if(norm<neighRadius){ nN++; m += pts[i]; outerAdd(pts[i],C); //note: instead of pts[ind]-m, we demean afterwards } } m /= (nN+0.0f); outerAdd(m,C,-nN); float fac=1.0f/(nN-1.0f); mul(C,fac); //now C is a covariance matrix float Q[3][3]={{0,0,0},{0,0,0},{0,0,0}}; float w[3]={0,0,0}; dsyevv3(C_2d,Q,w); //the largest eigenvector is the rightmost column nor[x].x=Q[0][2]; nor[x].y=Q[1][2]; nor[x].z=Q[2][2]; }
b2531477b755b95311677bc0ab9528cc9bd80658.cu
// ---------------------------------------------------------------------------- // Numerical diagonalization of 3x3 matrcies // Copyright (C) 2006 Joachim Kopp // ---------------------------------------------------------------------------- // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA // ---------------------------------------------------------------------------- #include <stdio.h> #include <math.h> #include <float.h> // Macros #define SQR(x) ((x)*(x)) // x^2 #define SQR_ABS(x) (SQR(cfloat(x)) + SQR(cimag(x))) // |x|^2 // Constants #define M_SQRT3 1.73205080756887729352744634151 // sqrt(3) // calculates eigenvalues of 2x2 float symmetric matrix __device__ void dsyevc2(float A, float B, float C, float *rt1, float *rt2){ float sm = A + C; float df = A - C; float rt = sqrt(SQR(df) + 4.0*B*B); float t; if (sm > 0.0) { *rt1 = 0.5 * (sm + rt); t = 1.0/(*rt1); *rt2 = (A*t)*C - (B*t)*B; } else if (sm < 0.0) { *rt2 = 0.5 * (sm - rt); t = 1.0/(*rt2); *rt1 = (A*t)*C - (B*t)*B; } else // This case needs to be treated separately to avoid div by 0 { *rt1 = 0.5 * rt; *rt2 = -0.5 * rt; } } // ---------------------------------------------------------------------------- __device__ void dsyev2(float A, float B, float C, float *rt1, float *rt2, float *cs, float *sn) // ---------------------------------------------------------------------------- // Calculates the eigensystem of a float symmetric 2x2 matrix // [ A B ] // [ B C ] // in the form // [ A B ] = [ cs -sn ] [ rt1 0 ] [ cs sn ] // [ B C ] [ sn cs ] [ 0 rt2 ] [ -sn cs ] // where rt1 >= rt2. Note that this convention is different from the one used // in the LAPACK routine DLAEV2, where |rt1| >= |rt2|. // ---------------------------------------------------------------------------- { float sm = A + C; float df = A - C; float rt = sqrt(SQR(df) + 4.0*B*B); float t; if (sm > 0.0) { *rt1 = 0.5 * (sm + rt); t = 1.0/(*rt1); *rt2 = (A*t)*C - (B*t)*B; } else if (sm < 0.0) { *rt2 = 0.5 * (sm - rt); t = 1.0/(*rt2); *rt1 = (A*t)*C - (B*t)*B; } else // This case needs to be treated separately to avoid div by 0 { *rt1 = 0.5 * rt; *rt2 = -0.5 * rt; } // Calculate eigenvectors if (df > 0.0) *cs = df + rt; else *cs = df - rt; if (fabs(*cs) > 2.0*fabs(B)) { t = -2.0 * B / *cs; *sn = 1.0 / sqrt(1.0 + SQR(t)); *cs = t * (*sn); } else if (fabs(B) == 0.0) { *cs = 1.0; *sn = 0.0; } else { t = -0.5 * (*cs) / B; *cs = 1.0 / sqrt(1.0 + SQR(t)); *sn = t * (*cs); } if (df > 0.0) { t = *cs; *cs = -(*sn); *sn = t; } } // ---------------------------------------------------------------------------- __device__ int dsyevc3(float A[3][3], float w[3]) // ---------------------------------------------------------------------------- // Calculates the eigenvalues of a symmetric 3x3 matrix A using Cardano's // analytical algorithm. // Only the diagonal and upper triangular parts of A are accessed. The access // is read-only. // ---------------------------------------------------------------------------- // Parameters: // A: The symmetric input matrix // w: Storage buffer for eigenvalues // ---------------------------------------------------------------------------- // Return value: // 0: Success // -1: Error // ---------------------------------------------------------------------------- { float m, c1, c0; // Determine coefficients of characteristic poynomial. We write // | a d f | // A = | d* b e | // | f* e* c | float de = A[0][1] * A[1][2]; // d * e float dd = SQR(A[0][1]); // d^2 float ee = SQR(A[1][2]); // e^2 float ff = SQR(A[0][2]); // f^2 m = A[0][0] + A[1][1] + A[2][2]; c1 = (A[0][0]*A[1][1] + A[0][0]*A[2][2] + A[1][1]*A[2][2]) // a*b + a*c + b*c - d^2 - e^2 - f^2 - (dd + ee + ff); c0 = A[2][2]*dd + A[0][0]*ee + A[1][1]*ff - A[0][0]*A[1][1]*A[2][2] - 2.0 * A[0][2]*de; // c*d^2 + a*e^2 + b*f^2 - a*b*c - 2*f*d*e) float p, sqrt_p, q, c, s, phi; p = SQR(m) - 3.0*c1; q = m*(p - (3.0/2.0)*c1) - (27.0/2.0)*c0; sqrt_p = sqrt(fabs(p)); phi = 27.0 * ( 0.25*SQR(c1)*(p - c1) + c0*(q + 27.0/4.0*c0)); phi = (1.0/3.0) * atan2(sqrt(fabs(phi)), q); c = sqrt_p*cos(phi); s = (1.0/M_SQRT3)*sqrt_p*sin(phi); w[0] = (1.0/3.0)*(m - c); w[1] = w[0] + s; w[2] = w[0] + c; w[0] -= s; return 0; } // ---------------------------------------------------------------------------- __device__ int dsyevv3(float A[3][3], float Q[3][3], float w[3]) // ---------------------------------------------------------------------------- // Calculates the eigenvalues and normalized eigenvectors of a symmetric 3x3 // matrix A using Cardano's method for the eigenvalues and an analytical // method based on vector cross products for the eigenvectors. // Only the diagonal and upper triangular parts of A need to contain meaningful // values. However, all of A may be used as temporary storage and may hence be // destroyed. // ---------------------------------------------------------------------------- // Parameters: // A: The symmetric input matrix // Q: Storage buffer for eigenvectors // w: Storage buffer for eigenvalues // ---------------------------------------------------------------------------- // Return value: // 0: Success // -1: Error // ---------------------------------------------------------------------------- // Dependencies: // dsyevc3() // ---------------------------------------------------------------------------- // Version history: // v1.1 (12 Mar 2012): Removed access to lower triangualr part of A // (according to the documentation, only the upper triangular part needs // to be filled) // v1.0: First released version // ---------------------------------------------------------------------------- { #ifndef EVALS_ONLY float norm; // Squared norm or inverse norm of current eigenvector float n0, n1; // Norm of first and second columns of A float n0tmp, n1tmp; // "Templates" for the calculation of n0/n1 - saves a few FLOPS float thresh; // Small number used as threshold for floating point comparisons float error; // Estimated maximum roundoff error in some steps float wmax; // The eigenvalue of maximum modulus float f, t; // Intermediate storage int i, j; // Loop counters #endif // Calculate eigenvalues dsyevc3(A, w); #ifndef EVALS_ONLY wmax = fabs(w[0]); if ((t=fabs(w[1])) > wmax) wmax = t; if ((t=fabs(w[2])) > wmax) wmax = t; thresh = SQR(8.0 * DBL_EPSILON * wmax); // Prepare calculation of eigenvectors n0tmp = SQR(A[0][1]) + SQR(A[0][2]); n1tmp = SQR(A[0][1]) + SQR(A[1][2]); Q[0][1] = A[0][1]*A[1][2] - A[0][2]*A[1][1]; Q[1][1] = A[0][2]*A[0][1] - A[1][2]*A[0][0]; Q[2][1] = SQR(A[0][1]); // Calculate first eigenvector by the formula // v[0] = (A - w[0]).e1 x (A - w[0]).e2 A[0][0] -= w[0]; A[1][1] -= w[0]; Q[0][0] = Q[0][1] + A[0][2]*w[0]; Q[1][0] = Q[1][1] + A[1][2]*w[0]; Q[2][0] = A[0][0]*A[1][1] - Q[2][1]; norm = SQR(Q[0][0]) + SQR(Q[1][0]) + SQR(Q[2][0]); n0 = n0tmp + SQR(A[0][0]); n1 = n1tmp + SQR(A[1][1]); error = n0 * n1; if (n0 <= thresh) // If the first column is zero, then (1,0,0) is an eigenvector { Q[0][0] = 1.0; Q[1][0] = 0.0; Q[2][0] = 0.0; } else if (n1 <= thresh) // If the second column is zero, then (0,1,0) is an eigenvector { Q[0][0] = 0.0; Q[1][0] = 1.0; Q[2][0] = 0.0; } else if (norm < SQR(64.0 * DBL_EPSILON) * error) { // If angle between A[0] and A[1] is too small, don't use t = SQR(A[0][1]); // cross product, but calculate v ~ (1, -A0/A1, 0) f = -A[0][0] / A[0][1]; if (SQR(A[1][1]) > t) { t = SQR(A[1][1]); f = -A[0][1] / A[1][1]; } if (SQR(A[1][2]) > t) f = -A[0][2] / A[1][2]; norm = 1.0/sqrt(1 + SQR(f)); Q[0][0] = norm; Q[1][0] = f * norm; Q[2][0] = 0.0; } else // This is the standard branch { norm = sqrt(1.0 / norm); for (j=0; j < 3; j++) Q[j][0] = Q[j][0] * norm; } // Prepare calculation of second eigenvector t = w[0] - w[1]; if (fabs(t) > 8.0 * DBL_EPSILON * wmax) { // For non-degenerate eigenvalue, calculate second eigenvector by the formula // v[1] = (A - w[1]).e1 x (A - w[1]).e2 A[0][0] += t; A[1][1] += t; Q[0][1] = Q[0][1] + A[0][2]*w[1]; Q[1][1] = Q[1][1] + A[1][2]*w[1]; Q[2][1] = A[0][0]*A[1][1] - Q[2][1]; norm = SQR(Q[0][1]) + SQR(Q[1][1]) + SQR(Q[2][1]); n0 = n0tmp + SQR(A[0][0]); n1 = n1tmp + SQR(A[1][1]); error = n0 * n1; if (n0 <= thresh) // If the first column is zero, then (1,0,0) is an eigenvector { Q[0][1] = 1.0; Q[1][1] = 0.0; Q[2][1] = 0.0; } else if (n1 <= thresh) // If the second column is zero, then (0,1,0) is an eigenvector { Q[0][1] = 0.0; Q[1][1] = 1.0; Q[2][1] = 0.0; } else if (norm < SQR(64.0 * DBL_EPSILON) * error) { // If angle between A[0] and A[1] is too small, don't use t = SQR(A[0][1]); // cross product, but calculate v ~ (1, -A0/A1, 0) f = -A[0][0] / A[0][1]; if (SQR(A[1][1]) > t) { t = SQR(A[1][1]); f = -A[0][1] / A[1][1]; } if (SQR(A[1][2]) > t) f = -A[0][2] / A[1][2]; norm = 1.0/sqrt(1 + SQR(f)); Q[0][1] = norm; Q[1][1] = f * norm; Q[2][1] = 0.0; } else { norm = sqrt(1.0 / norm); for (j=0; j < 3; j++) Q[j][1] = Q[j][1] * norm; } } else { // For degenerate eigenvalue, calculate second eigenvector according to // v[1] = v[0] x (A - w[1]).e[i] // // This would floatly get to complicated if we could not assume all of A to // contain meaningful values. A[1][0] = A[0][1]; A[2][0] = A[0][2]; A[2][1] = A[1][2]; A[0][0] += w[0]; A[1][1] += w[0]; for (i=0; i < 3; i++) { A[i][i] -= w[1]; n0 = SQR(A[0][i]) + SQR(A[1][i]) + SQR(A[2][i]); if (n0 > thresh) { Q[0][1] = Q[1][0]*A[2][i] - Q[2][0]*A[1][i]; Q[1][1] = Q[2][0]*A[0][i] - Q[0][0]*A[2][i]; Q[2][1] = Q[0][0]*A[1][i] - Q[1][0]*A[0][i]; norm = SQR(Q[0][1]) + SQR(Q[1][1]) + SQR(Q[2][1]); if (norm > SQR(256.0 * DBL_EPSILON) * n0) // Accept cross product only if the angle between { // the two vectors was not too small norm = sqrt(1.0 / norm); for (j=0; j < 3; j++) Q[j][1] = Q[j][1] * norm; break; } } } if (i == 3) // This means that any vector orthogonal to v[0] is an EV. { for (j=0; j < 3; j++) if (Q[j][0] != 0.0) // Find nonzero element of v[0] ... { // ... and swap it with the next one norm = 1.0 / sqrt(SQR(Q[j][0]) + SQR(Q[(j+1)%3][0])); Q[j][1] = Q[(j+1)%3][0] * norm; Q[(j+1)%3][1] = -Q[j][0] * norm; Q[(j+2)%3][1] = 0.0; break; } } } // Calculate third eigenvector according to // v[2] = v[0] x v[1] Q[0][2] = Q[1][0]*Q[2][1] - Q[2][0]*Q[1][1]; Q[1][2] = Q[2][0]*Q[0][1] - Q[0][0]*Q[2][1]; Q[2][2] = Q[0][0]*Q[1][1] - Q[1][0]*Q[0][1]; #endif return 0; } // ---------------------------------------------------------------------------- __device__ void dsytrd3(float A[3][3], float Q[3][3], float d[3], float e[2]) // ---------------------------------------------------------------------------- // Reduces a symmetric 3x3 matrix to tridiagonal form by applying // (unitary) Householder transformations: // [ d[0] e[0] ] // A = Q . [ e[0] d[1] e[1] ] . Q^T // [ e[1] d[2] ] // The function accesses only the diagonal and upper triangular parts of // A. The access is read-only. // --------------------------------------------------------------------------- { const int n = 3; float u[n], q[n]; float omega, f; float K, h, g; // Initialize Q to the identitity matrix #ifndef EVALS_ONLY for (int i=0; i < n; i++) { Q[i][i] = 1.0; for (int j=0; j < i; j++) Q[i][j] = Q[j][i] = 0.0; } #endif // Bring first row and column to the desired form h = SQR(A[0][1]) + SQR(A[0][2]); if (A[0][1] > 0) g = -sqrt(h); else g = sqrt(h); e[0] = g; f = g * A[0][1]; u[1] = A[0][1] - g; u[2] = A[0][2]; omega = h - f; if (omega > 0.0) { omega = 1.0 / omega; K = 0.0; for (int i=1; i < n; i++) { f = A[1][i] * u[1] + A[i][2] * u[2]; q[i] = omega * f; // p K += u[i] * f; // u* A u } K *= 0.5 * SQR(omega); for (int i=1; i < n; i++) q[i] = q[i] - K * u[i]; d[0] = A[0][0]; d[1] = A[1][1] - 2.0*q[1]*u[1]; d[2] = A[2][2] - 2.0*q[2]*u[2]; // Store inverse Householder transformation in Q #ifndef EVALS_ONLY for (int j=1; j < n; j++) { f = omega * u[j]; for (int i=1; i < n; i++) Q[i][j] = Q[i][j] - f*u[i]; } #endif // Calculate updated A[1][2] and store it in e[1] e[1] = A[1][2] - q[1]*u[2] - u[1]*q[2]; } else { for (int i=0; i < n; i++) d[i] = A[i][i]; e[1] = A[1][2]; } } // ---------------------------------------------------------------------------- __device__ int dsyevq3(float A[3][3], float Q[3][3], float w[3]) // ---------------------------------------------------------------------------- // Calculates the eigenvalues and normalized eigenvectors of a symmetric 3x3 // matrix A using the QL algorithm with implicit shifts, preceded by a // Householder reduction to tridiagonal form. // The function accesses only the diagonal and upper triangular parts of A. // The access is read-only. // ---------------------------------------------------------------------------- // Parameters: // A: The symmetric input matrix // Q: Storage buffer for eigenvectors // w: Storage buffer for eigenvalues // ---------------------------------------------------------------------------- // Return value: // 0: Success // -1: Error (no convergence) // ---------------------------------------------------------------------------- // Dependencies: // dsytrd3() // ---------------------------------------------------------------------------- { const int n = 3; float e[3]; // The third element is used only as temporary workspace float g, r, p, f, b, s, c, t; // Intermediate storage int nIter; int m; // Transform A to float tridiagonal form by the Householder method dsytrd3(A, Q, w, e); // Calculate eigensystem of the remaining float symmetric tridiagonal matrix // with the QL method // // Loop over all off-diagonal elements for (int l=0; l < n-1; l++) { nIter = 0; while (1) { // Check for convergence and exit iteration loop if off-diagonal // element e(l) is zero for (m=l; m <= n-2; m++) { g = fabs(w[m])+fabs(w[m+1]); if (fabs(e[m]) + g == g) break; } if (m == l) break; if (nIter++ >= 30) return -1; // Calculate g = d_m - k g = (w[l+1] - w[l]) / (e[l] + e[l]); r = sqrt(SQR(g) + 1.0); if (g > 0) g = w[m] - w[l] + e[l]/(g + r); else g = w[m] - w[l] + e[l]/(g - r); s = c = 1.0; p = 0.0; for (int i=m-1; i >= l; i--) { f = s * e[i]; b = c * e[i]; if (fabs(f) > fabs(g)) { c = g / f; r = sqrt(SQR(c) + 1.0); e[i+1] = f * r; c *= (s = 1.0/r); } else { s = f / g; r = sqrt(SQR(s) + 1.0); e[i+1] = g * r; s *= (c = 1.0/r); } g = w[i+1] - p; r = (w[i] - g)*s + 2.0*c*b; p = s * r; w[i+1] = g + p; g = c*r - b; // Form eigenvectors #ifndef EVALS_ONLY for (int k=0; k < n; k++) { t = Q[k][i+1]; Q[k][i+1] = s*Q[k][i] + c*t; Q[k][i] = c*Q[k][i] - s*t; } #endif } w[l] -= p; e[l] = g; e[m] = 0.0; } } return 0; } /////// __global__ interface,column major for matlab __device__ void eig2(const float* M, float* V, float* L){ dsyev2(M[0],M[1],M[3],&L[3],&L[0],&V[1],&V[3]); V[2]=V[1]; V[0]=-V[3]; L[1]=L[2]=0; } __device__ void eig3(const float* M, float* V, float* L,bool useIterative=false){ float A[3][3] = {{M[0],M[1],M[2]}, {M[3],M[4],M[5]}, {M[6],M[7],M[8]} }; float Q[3][3] = { {0,0,0}, {0,0,0}, {0,0,0} }; float LL[3] = {0,0,0}; int conv=0; if(useIterative){ conv=dsyevq3(A,Q,LL); }else{ conv=dsyevv3(A,Q,LL); } if(conv<0){ L[0]=-1; return; } L[0]=LL[0]; L[4]=LL[1]; L[8]=LL[2]; V[0]=Q[0][0]; V[1]=Q[1][0]; V[2]=Q[2][0]; V[3]=Q[0][1]; V[4]=Q[1][1]; V[5]=Q[2][1]; V[6]=Q[0][2]; V[7]=Q[1][2]; V[8]=Q[2][2]; } __global__ void eig(const float* M, float* V, float* L, const int n, bool useIterative=false){ if(n==2){ eig2(M,V,L); }else if(n==3){ eig3(M,V,L,useIterative); } } __global__ void eigVal(const float* M, float* L, const int n){ if(n==2){ dsyevc2(M[0],M[1],M[3],&L[1],&L[0]); }else if (n==3){ float A[3][3] = {{M[0],M[1],M[2]}, {M[3],M[4],M[5]}, {M[6],M[7],M[8]} }; dsyevc3(A,L); } } #include "cutil_math.h" #include "cutil_math2.h" __global__ void estimateNormals(const float* pts1, float* nor1,const int n, const float neighRadius){ size_t x = threadIdx.x + blockDim.x * blockIdx.x; if(x>=n) return; const float3* pts = (float3*) pts1; float3* nor = (float3*) nor1; float3 m = make_float3(0,0,0); float C_2d[3][3]={{0,0,0},{0,0,0},{0,0,0}}; float* C = (float*) C_2d; //rowwise int nN=0; for(int i=0; i<n;i++){ //if(i==x) continue; float3 diff3 = pts[i]-pts[x]; float norm = sqrt(dot(diff3,diff3)); if(norm<neighRadius){ nN++; m += pts[i]; outerAdd(pts[i],C); //note: instead of pts[ind]-m, we demean afterwards } } m /= (nN+0.0f); outerAdd(m,C,-nN); float fac=1.0f/(nN-1.0f); mul(C,fac); //now C is a covariance matrix float Q[3][3]={{0,0,0},{0,0,0},{0,0,0}}; float w[3]={0,0,0}; dsyevv3(C_2d,Q,w); //the largest eigenvector is the rightmost column nor[x].x=Q[0][2]; nor[x].y=Q[1][2]; nor[x].z=Q[2][2]; }
11bbf55a5a9bee50b5810fb6cb187ec66e0d2db4.hip
// !!! This is a file automatically generated by hipify!!! // C++ headers #include <algorithm> #include <numeric> // CUDA runtime #include <hip/hip_runtime.h> // CMSSW headers #include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" #include "PixelRecHitGPUKernel.h" #include "gpuPixelRecHits.h" namespace { __global__ void setHitsLayerStart(uint32_t const* __restrict__ hitsModuleStart, pixelCPEforGPU::ParamsOnGPU const* cpeParams, uint32_t* hitsLayerStart) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto m = cpeParams->commonParams().isPhase2 ? phase2PixelTopology::numberOfLayers : phase1PixelTopology::numberOfLayers; assert(0 == hitsModuleStart[0]); if (i <= m) { hitsLayerStart[i] = hitsModuleStart[cpeParams->layerGeometry().layerStart[i]]; #ifdef GPU_DEBUG printf("LayerStart %d/%d at module %d: %d\n", i, m, cpeParams->layerGeometry().layerStart[i], hitsLayerStart[i]); #endif } } } // namespace namespace pixelgpudetails { TrackingRecHit2DGPU PixelRecHitGPUKernel::makeHitsAsync(SiPixelDigisCUDA const& digis_d, SiPixelClustersCUDA const& clusters_d, BeamSpotCUDA const& bs_d, pixelCPEforGPU::ParamsOnGPU const* cpeParams, bool isPhase2, hipStream_t stream) const { auto nHits = clusters_d.nClusters(); TrackingRecHit2DGPU hits_d( nHits, isPhase2, clusters_d.offsetBPIX2(), cpeParams, clusters_d.clusModuleStart(), stream); assert(hits_d.nMaxModules() == isPhase2 ? phase2PixelTopology::numberOfModules : phase1PixelTopology::numberOfModules); int activeModulesWithDigis = digis_d.nModules(); // protect from empty events if (activeModulesWithDigis) { int threadsPerBlock = 128; int blocks = activeModulesWithDigis; #ifdef GPU_DEBUG std::cout << "launching getHits kernel for " << blocks << " blocks" << std::endl; #endif hipLaunchKernelGGL(( gpuPixelRecHits::getHits), dim3(blocks), dim3(threadsPerBlock), 0, stream, cpeParams, bs_d.data(), digis_d.view(), digis_d.nDigis(), clusters_d.view(), hits_d.view()); cudaCheck(hipGetLastError()); #ifdef GPU_DEBUG cudaCheck(hipDeviceSynchronize()); #endif // assuming full warp of threads is better than a smaller number... if (nHits) { hipLaunchKernelGGL(( setHitsLayerStart), dim3(1), dim3(32), 0, stream, clusters_d.clusModuleStart(), cpeParams, hits_d.hitsLayerStart()); cudaCheck(hipGetLastError()); auto nLayers = isPhase2 ? phase2PixelTopology::numberOfLayers : phase1PixelTopology::numberOfLayers; cms::cuda::fillManyFromVector(hits_d.phiBinner(), nLayers, hits_d.iphi(), hits_d.hitsLayerStart(), nHits, 256, hits_d.phiBinnerStorage(), stream); cudaCheck(hipGetLastError()); #ifdef GPU_DEBUG cudaCheck(hipDeviceSynchronize()); #endif } } return hits_d; } } // namespace pixelgpudetails
11bbf55a5a9bee50b5810fb6cb187ec66e0d2db4.cu
// C++ headers #include <algorithm> #include <numeric> // CUDA runtime #include <cuda_runtime.h> // CMSSW headers #include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" #include "PixelRecHitGPUKernel.h" #include "gpuPixelRecHits.h" namespace { __global__ void setHitsLayerStart(uint32_t const* __restrict__ hitsModuleStart, pixelCPEforGPU::ParamsOnGPU const* cpeParams, uint32_t* hitsLayerStart) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto m = cpeParams->commonParams().isPhase2 ? phase2PixelTopology::numberOfLayers : phase1PixelTopology::numberOfLayers; assert(0 == hitsModuleStart[0]); if (i <= m) { hitsLayerStart[i] = hitsModuleStart[cpeParams->layerGeometry().layerStart[i]]; #ifdef GPU_DEBUG printf("LayerStart %d/%d at module %d: %d\n", i, m, cpeParams->layerGeometry().layerStart[i], hitsLayerStart[i]); #endif } } } // namespace namespace pixelgpudetails { TrackingRecHit2DGPU PixelRecHitGPUKernel::makeHitsAsync(SiPixelDigisCUDA const& digis_d, SiPixelClustersCUDA const& clusters_d, BeamSpotCUDA const& bs_d, pixelCPEforGPU::ParamsOnGPU const* cpeParams, bool isPhase2, cudaStream_t stream) const { auto nHits = clusters_d.nClusters(); TrackingRecHit2DGPU hits_d( nHits, isPhase2, clusters_d.offsetBPIX2(), cpeParams, clusters_d.clusModuleStart(), stream); assert(hits_d.nMaxModules() == isPhase2 ? phase2PixelTopology::numberOfModules : phase1PixelTopology::numberOfModules); int activeModulesWithDigis = digis_d.nModules(); // protect from empty events if (activeModulesWithDigis) { int threadsPerBlock = 128; int blocks = activeModulesWithDigis; #ifdef GPU_DEBUG std::cout << "launching getHits kernel for " << blocks << " blocks" << std::endl; #endif gpuPixelRecHits::getHits<<<blocks, threadsPerBlock, 0, stream>>>( cpeParams, bs_d.data(), digis_d.view(), digis_d.nDigis(), clusters_d.view(), hits_d.view()); cudaCheck(cudaGetLastError()); #ifdef GPU_DEBUG cudaCheck(cudaDeviceSynchronize()); #endif // assuming full warp of threads is better than a smaller number... if (nHits) { setHitsLayerStart<<<1, 32, 0, stream>>>(clusters_d.clusModuleStart(), cpeParams, hits_d.hitsLayerStart()); cudaCheck(cudaGetLastError()); auto nLayers = isPhase2 ? phase2PixelTopology::numberOfLayers : phase1PixelTopology::numberOfLayers; cms::cuda::fillManyFromVector(hits_d.phiBinner(), nLayers, hits_d.iphi(), hits_d.hitsLayerStart(), nHits, 256, hits_d.phiBinnerStorage(), stream); cudaCheck(cudaGetLastError()); #ifdef GPU_DEBUG cudaCheck(cudaDeviceSynchronize()); #endif } } return hits_d; } } // namespace pixelgpudetails
2500d0b6f573aee6123a316223eda368c78e1b30.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2010-2011, NVIDIA Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA Corporation nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <nih/bvh/cuda/binned_sah_builder.h> #include <nih/basic/cuda_config.h> #define ACCESS_BINS(a,id,axis,index,stride) a[id + (axis*BINS + index)*stride] namespace nih { namespace cuda { namespace binned_sah { FORCE_INLINE NIH_DEVICE Bin operator+ (const Bin bin1, const Bin bin2) { Bin r; r.bmin = make_float4( fminf( bin1.bmin.x, bin2.bmin.x ), fminf( bin1.bmin.y, bin2.bmin.y ), fminf( bin1.bmin.z, bin2.bmin.z ), __int_as_float( __float_as_int( bin1.bmin.w ) + __float_as_int( bin2.bmin.w )) ); r.bmax = make_float4( fmaxf( bin1.bmax.x, bin2.bmax.x ), fmaxf( bin1.bmax.y, bin2.bmax.y ), fmaxf( bin1.bmax.z, bin2.bmax.z ), 0.0f ); return r; } FORCE_INLINE NIH_DEVICE Bin merge(const float3 bmin, const float3 bmax, const int32 size) { Bin r; r.bmin = make_float4( bmin.x, bmin.y, bmin.z, __int_as_float(size) ); r.bmax = make_float4( bmax.x, bmax.y, bmax.z, 0.0f ); return r; } FORCE_INLINE NIH_DEVICE Bin bin(const uint32 BINS, const Bins bins, const int id, const int axis, const int index, const int stride) { return merge( ACCESS_BINS(bins.bmin,id,axis,index,stride), ACCESS_BINS(bins.bmax,id,axis,index,stride), ACCESS_BINS(bins.size,id,axis,index,stride) ); } __global__ void init_bins_kernel(const uint32 BINS, const uint32 n_nodes, Bins bins) { const uint32 grid_size = gridDim.x * blockDim.x; // loop through all logical blocks associated to this physical one for (uint32 base_idx = blockIdx.x * blockDim.x; base_idx < n_nodes*BINS*3; base_idx += grid_size) { const uint32 id = threadIdx.x + base_idx; if (id >= n_nodes*BINS*3) continue; const float HUGE = 1.0e8f; bins.bmin[id] = make_float3( HUGE, HUGE, HUGE ); bins.bmax[id] = make_float3( -HUGE, -HUGE, -HUGE ); bins.size[id] = 0; } } void init_bins(const uint32 BINS, const uint32 n_nodes, Bins bins) { const uint32 BLOCK_SIZE = SAH_SINGLE_WARP ? 32 : 256; const size_t max_blocks = SAH_SINGLE_WARP ? 1 : thrust::detail::backend::cuda::arch::max_active_blocks(init_bins_kernel, BLOCK_SIZE, 0); const size_t n_blocks = nih::min( max_blocks, (n_nodes*BINS*3 + BLOCK_SIZE-1) / BLOCK_SIZE ); hipLaunchKernelGGL(( init_bins_kernel), dim3(n_blocks),dim3(BLOCK_SIZE), 0, 0, BINS, n_nodes, bins ); hipDeviceSynchronize(); } /// evaluate the area of a bin FORCE_INLINE NIH_HOST_DEVICE float area(const Bin bin) { const float3 edge = make_float3( bin.bmax.x - bin.bmin.x, bin.bmax.y - bin.bmin.y, bin.bmax.z - bin.bmin.z ); return edge.x*edge.y + edge.x*edge.z + edge.y*edge.z; } /// evaluate the SAH cost of a given division in 2 bins FORCE_INLINE NIH_DEVICE float sah_cost( const Bin bin1, const Bin bin2) { return area( bin1 ) * __float_as_int(bin1.bmin.w) + area( bin2 ) * __float_as_int(bin2.bmin.w); } /// /// CUDA kernel: find the best SAH split plane for each node in the input task queue, /// and generate child tasks. /// __global__ void sah_split_kernel( const uint32 BINS, Bins bins, Queue qin, const int input_node_offset, Queue qout, uint32* n_output, int output_node_offset, Bvh_node* nodes, uint32* n_leaves, const uint32 max_leaf_size, const float max_cost) { typedef Bin Bin; const uint32 grid_size = gridDim.x * blockDim.x; __shared__ uint32 warp_broadcast[32]; const uint32 warp_tid = threadIdx.x & (CUDA_config::WARP_SIZE-1); const uint32 warp_id = threadIdx.x >> CUDA_config::log_WARP_SIZE; // loop through all logical blocks associated to this physical one for (uint32 base_idx = blockIdx.x * blockDim.x; base_idx < qin.size; base_idx += grid_size) { const uint32 id = threadIdx.x + base_idx; if (id >= qin.size) continue; int best_split = -1; Bin bestL; Bin bestR; const Bin bbox = qin.bins[id]; const int node_size = __float_as_int(bbox.bmin.w); const uint32 skip_node = binary_cast<uint32>(bbox.bmax.w); const int node_id = input_node_offset + id; // mark this node tentatively as a leaf node bool split = false; // and try to split it if necessary if (node_size > max_leaf_size) { float best_cost = max_cost * area( bbox ) * node_size; Bin bboxesR[ SAH_MAX_BINS ]; // perform a serial SAH evaluation (fast for small arrays) for (int axis = 0; axis < 3; axis++) { // right scan bboxesR[BINS - 1] = bin( BINS, bins, id, axis, BINS-1, qin.size ); for (int i = BINS - 2; i >= 0; i--) bboxesR[i] = bboxesR[i + 1] + bin( BINS, bins, id, axis, i, qin.size ); // left scan Bin bboxesL = bin( BINS, bins, id, axis, 0, qin.size ); for (int i = 0; i < BINS - 1; i++) { // skip invalid splits if (__float_as_int(bboxesL.bmin.w) != 0 && __float_as_int(bboxesR[i+1].bmin.w) != 0) { const float cost = sah_cost( bboxesL, bboxesR[i+1] ); if(cost < best_cost) { best_cost = cost; best_split = axis * BINS + i; bestL = bboxesL; bestR = bboxesR[i+1]; split = true; } } const Bin next_bin = bin( BINS, bins, id, axis, i+1, qin.size ); bboxesL = bboxesL + next_bin; } } // TODO: check whether the split failed and mark for // random-order middle split. } if(split) { // allocate 2 child tasks and their corresponding nodes const int new_offset = alloc<2>( split, n_output, warp_tid, warp_broadcast + warp_id ); const int new_split = new_offset; const int new_node = output_node_offset + new_offset; qin.splits[ id ] = Split( new_split, best_split ); // pack skip nodes in bmax.w bestL.bmax.w = binary_cast<float>(new_node+1); bestR.bmax.w = binary_cast<float>(skip_node); qout.bins[ new_split+0 ] = bestL; qout.bins[ new_split+1 ] = bestR; // set this node as a split nodes[ node_id ] = Bvh_node( Bvh_node::kInternal, new_node, skip_node ); #ifdef SAH_CHECKS if (__float_as_int(bestL.bmin.w) + __float_as_int(bestR.bmin.w) != __float_as_int(bbox.bmin.w)) { printf("split bbox :\n" \ " [%f, %f, %f], [%f, %f, %f] - %d\n" \ " [%f, %f, %f], [%f, %f, %f] - %d\n" \ " [%f, %f, %f], [%f, %f, %f] - %d\n %d\n", \ bbox.bmin.x,bbox.bmin.y,bbox.bmin.z, bbox.bmax.x,bbox.bmax.y,bbox.bmax.z, __float_as_int(bbox.bmin.w), bestL.bmin.x,bestL.bmin.y,bestL.bmin.z, bestL.bmax.x,bestL.bmax.y,bestL.bmax.z, __float_as_int(bestL.bmin.w), bestR.bmin.x,bestR.bmin.y,bestR.bmin.z, bestR.bmax.x,bestR.bmax.y,bestR.bmax.z, __float_as_int(bestR.bmin.w), new_split ); } #endif } else { // allocate a leaf const int leaf_id = alloc<1>( true, n_leaves, warp_tid, warp_broadcast + warp_id ); qin.splits[ id ] = Split( -1, leaf_id ); // set this node as a leaf nodes[ node_id ] = Bvh_node( Bvh_node::kLeaf, leaf_id, skip_node ); } } } /// /// Find the best SAH split plane for each node in the input task queue, and generate child tasks /// void sah_split( const uint32 BINS, Bins bins, Queue qin, const int input_node_offset, Queue qout, uint32* n_output, int output_node_offset, Bvh_node* nodes, uint32* n_leaves, const uint32 max_leaf_size, const float max_cost) { const uint32 BLOCK_SIZE = SAH_SINGLE_WARP ? 32 : 256; const size_t max_blocks = SAH_SINGLE_WARP ? 1 : thrust::detail::backend::cuda::arch::max_active_blocks(sah_split_kernel, BLOCK_SIZE, 0); const size_t n_blocks = nih::min( max_blocks, (qin.size + BLOCK_SIZE-1) / BLOCK_SIZE ); hipLaunchKernelGGL(( sah_split_kernel), dim3(n_blocks),dim3(BLOCK_SIZE), 0, 0, BINS, bins, qin, input_node_offset, qout, n_output, output_node_offset, nodes, n_leaves, max_leaf_size, max_cost ); hipDeviceSynchronize(); } /// /// CUDA kernel: assign the objects to their new task, or to a node if there was no split /// __global__ void distribute_objects_kernel( const uint32 BINS, Objects objects, const int n_objects, Queue queue, const int input_node_offset, Bins bins) { typedef Split Split; const uint32 grid_size = gridDim.x * blockDim.x; // loop through all logical blocks associated to this physical one for (uint32 base_idx = blockIdx.x * blockDim.x; base_idx < n_objects; base_idx += grid_size) { const uint32 idx = threadIdx.x + base_idx; if (idx >= n_objects) return; const uint32 id = idx; // check if the object has already been assigned to a node const int node_id = objects.node_ids[id]; if (node_id > -1) continue; const int4 bin_id = objects.bin_ids[idx]; const int split_id = objects.split_ids[id]; const Split split = queue.splits[split_id]; const int32 new_split_id = split.task_id; // if the node has not been split, we have to assign its objects if(new_split_id == -1) { const int32 leaf_id = split.best_plane; objects.node_ids[id] = leaf_id; continue; } // assign the object to its new task const int32 best_split = split.best_plane; const int32 selected_bin = best_split < BINS ? bin_id.x : (best_split < 2 * BINS ? bin_id.y : bin_id.z); // select the axis&bin of the best split objects.split_ids[id] = selected_bin <= (best_split & (BINS-1)) ? new_split_id : new_split_id + 1; } } /// /// Assign the objects to their new task, or to a node if there was no split /// void distribute_objects( const uint32 BINS, Objects objects, const int n_objects, Queue queue, const int input_node_offset, Bins bins) { const uint32 BLOCK_SIZE = SAH_SINGLE_WARP ? 32 : 256; const size_t max_blocks = SAH_SINGLE_WARP ? 1 : thrust::detail::backend::cuda::arch::max_active_blocks(distribute_objects_kernel, BLOCK_SIZE, 0); const size_t n_blocks = nih::min( max_blocks, (n_objects + BLOCK_SIZE-1) / BLOCK_SIZE ); hipLaunchKernelGGL(( distribute_objects_kernel), dim3(n_blocks),dim3(BLOCK_SIZE), 0, 0, BINS, objects, n_objects, queue, input_node_offset, bins ); hipDeviceSynchronize(); } /// /// CUDA kernel: setup the leaf array /// __global__ void setup_leaves_kernel( const int n_objects, const int32* leaf_ids, uint2* leaves) { const uint32 grid_size = gridDim.x * blockDim.x; // loop through all logical blocks associated to this physical one for (uint32 base_idx = blockIdx.x * blockDim.x; base_idx < n_objects; base_idx += grid_size) { const uint32 id = threadIdx.x + base_idx; if (id >= n_objects) continue; const int32 leaf_id = leaf_ids[ id ]; const int32 prev_leaf_id = id == 0 ? -1 : leaf_ids[ id-1 ]; const int32 next_leaf_id = id == n_objects-1 ? -1 : leaf_ids[ id+1 ]; if (prev_leaf_id != leaf_id) leaves[ leaf_id ].x = id; if (next_leaf_id != leaf_id) leaves[ leaf_id ].y = id+1; } } /// /// Setup the leaf array /// void setup_leaves( const int n_objects, const int32* leaf_ids, uint2* leaves) { const uint32 BLOCK_SIZE = SAH_SINGLE_WARP ? 32 : 256; const size_t max_blocks = SAH_SINGLE_WARP ? 1 : thrust::detail::backend::cuda::arch::max_active_blocks(setup_leaves_kernel, BLOCK_SIZE, 0); const size_t n_blocks = nih::min( max_blocks, (n_objects + BLOCK_SIZE-1) / BLOCK_SIZE ); hipLaunchKernelGGL(( setup_leaves_kernel), dim3(n_blocks),dim3(BLOCK_SIZE), 0, 0, n_objects, leaf_ids, leaves ); hipDeviceSynchronize(); } } // namespace binned_sah } // namespace cuda } // namespace nih
2500d0b6f573aee6123a316223eda368c78e1b30.cu
/* * Copyright (c) 2010-2011, NVIDIA Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA Corporation nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <nih/bvh/cuda/binned_sah_builder.h> #include <nih/basic/cuda_config.h> #define ACCESS_BINS(a,id,axis,index,stride) a[id + (axis*BINS + index)*stride] namespace nih { namespace cuda { namespace binned_sah { FORCE_INLINE NIH_DEVICE Bin operator+ (const Bin bin1, const Bin bin2) { Bin r; r.bmin = make_float4( fminf( bin1.bmin.x, bin2.bmin.x ), fminf( bin1.bmin.y, bin2.bmin.y ), fminf( bin1.bmin.z, bin2.bmin.z ), __int_as_float( __float_as_int( bin1.bmin.w ) + __float_as_int( bin2.bmin.w )) ); r.bmax = make_float4( fmaxf( bin1.bmax.x, bin2.bmax.x ), fmaxf( bin1.bmax.y, bin2.bmax.y ), fmaxf( bin1.bmax.z, bin2.bmax.z ), 0.0f ); return r; } FORCE_INLINE NIH_DEVICE Bin merge(const float3 bmin, const float3 bmax, const int32 size) { Bin r; r.bmin = make_float4( bmin.x, bmin.y, bmin.z, __int_as_float(size) ); r.bmax = make_float4( bmax.x, bmax.y, bmax.z, 0.0f ); return r; } FORCE_INLINE NIH_DEVICE Bin bin(const uint32 BINS, const Bins bins, const int id, const int axis, const int index, const int stride) { return merge( ACCESS_BINS(bins.bmin,id,axis,index,stride), ACCESS_BINS(bins.bmax,id,axis,index,stride), ACCESS_BINS(bins.size,id,axis,index,stride) ); } __global__ void init_bins_kernel(const uint32 BINS, const uint32 n_nodes, Bins bins) { const uint32 grid_size = gridDim.x * blockDim.x; // loop through all logical blocks associated to this physical one for (uint32 base_idx = blockIdx.x * blockDim.x; base_idx < n_nodes*BINS*3; base_idx += grid_size) { const uint32 id = threadIdx.x + base_idx; if (id >= n_nodes*BINS*3) continue; const float HUGE = 1.0e8f; bins.bmin[id] = make_float3( HUGE, HUGE, HUGE ); bins.bmax[id] = make_float3( -HUGE, -HUGE, -HUGE ); bins.size[id] = 0; } } void init_bins(const uint32 BINS, const uint32 n_nodes, Bins bins) { const uint32 BLOCK_SIZE = SAH_SINGLE_WARP ? 32 : 256; const size_t max_blocks = SAH_SINGLE_WARP ? 1 : thrust::detail::backend::cuda::arch::max_active_blocks(init_bins_kernel, BLOCK_SIZE, 0); const size_t n_blocks = nih::min( max_blocks, (n_nodes*BINS*3 + BLOCK_SIZE-1) / BLOCK_SIZE ); init_bins_kernel<<<n_blocks,BLOCK_SIZE>>>( BINS, n_nodes, bins ); cudaThreadSynchronize(); } /// evaluate the area of a bin FORCE_INLINE NIH_HOST_DEVICE float area(const Bin bin) { const float3 edge = make_float3( bin.bmax.x - bin.bmin.x, bin.bmax.y - bin.bmin.y, bin.bmax.z - bin.bmin.z ); return edge.x*edge.y + edge.x*edge.z + edge.y*edge.z; } /// evaluate the SAH cost of a given division in 2 bins FORCE_INLINE NIH_DEVICE float sah_cost( const Bin bin1, const Bin bin2) { return area( bin1 ) * __float_as_int(bin1.bmin.w) + area( bin2 ) * __float_as_int(bin2.bmin.w); } /// /// CUDA kernel: find the best SAH split plane for each node in the input task queue, /// and generate child tasks. /// __global__ void sah_split_kernel( const uint32 BINS, Bins bins, Queue qin, const int input_node_offset, Queue qout, uint32* n_output, int output_node_offset, Bvh_node* nodes, uint32* n_leaves, const uint32 max_leaf_size, const float max_cost) { typedef Bin Bin; const uint32 grid_size = gridDim.x * blockDim.x; __shared__ uint32 warp_broadcast[32]; const uint32 warp_tid = threadIdx.x & (CUDA_config::WARP_SIZE-1); const uint32 warp_id = threadIdx.x >> CUDA_config::log_WARP_SIZE; // loop through all logical blocks associated to this physical one for (uint32 base_idx = blockIdx.x * blockDim.x; base_idx < qin.size; base_idx += grid_size) { const uint32 id = threadIdx.x + base_idx; if (id >= qin.size) continue; int best_split = -1; Bin bestL; Bin bestR; const Bin bbox = qin.bins[id]; const int node_size = __float_as_int(bbox.bmin.w); const uint32 skip_node = binary_cast<uint32>(bbox.bmax.w); const int node_id = input_node_offset + id; // mark this node tentatively as a leaf node bool split = false; // and try to split it if necessary if (node_size > max_leaf_size) { float best_cost = max_cost * area( bbox ) * node_size; Bin bboxesR[ SAH_MAX_BINS ]; // perform a serial SAH evaluation (fast for small arrays) for (int axis = 0; axis < 3; axis++) { // right scan bboxesR[BINS - 1] = bin( BINS, bins, id, axis, BINS-1, qin.size ); for (int i = BINS - 2; i >= 0; i--) bboxesR[i] = bboxesR[i + 1] + bin( BINS, bins, id, axis, i, qin.size ); // left scan Bin bboxesL = bin( BINS, bins, id, axis, 0, qin.size ); for (int i = 0; i < BINS - 1; i++) { // skip invalid splits if (__float_as_int(bboxesL.bmin.w) != 0 && __float_as_int(bboxesR[i+1].bmin.w) != 0) { const float cost = sah_cost( bboxesL, bboxesR[i+1] ); if(cost < best_cost) { best_cost = cost; best_split = axis * BINS + i; bestL = bboxesL; bestR = bboxesR[i+1]; split = true; } } const Bin next_bin = bin( BINS, bins, id, axis, i+1, qin.size ); bboxesL = bboxesL + next_bin; } } // TODO: check whether the split failed and mark for // random-order middle split. } if(split) { // allocate 2 child tasks and their corresponding nodes const int new_offset = alloc<2>( split, n_output, warp_tid, warp_broadcast + warp_id ); const int new_split = new_offset; const int new_node = output_node_offset + new_offset; qin.splits[ id ] = Split( new_split, best_split ); // pack skip nodes in bmax.w bestL.bmax.w = binary_cast<float>(new_node+1); bestR.bmax.w = binary_cast<float>(skip_node); qout.bins[ new_split+0 ] = bestL; qout.bins[ new_split+1 ] = bestR; // set this node as a split nodes[ node_id ] = Bvh_node( Bvh_node::kInternal, new_node, skip_node ); #ifdef SAH_CHECKS if (__float_as_int(bestL.bmin.w) + __float_as_int(bestR.bmin.w) != __float_as_int(bbox.bmin.w)) { printf("split bbox :\n" \ " [%f, %f, %f], [%f, %f, %f] - %d\n" \ " [%f, %f, %f], [%f, %f, %f] - %d\n" \ " [%f, %f, %f], [%f, %f, %f] - %d\n %d\n", \ bbox.bmin.x,bbox.bmin.y,bbox.bmin.z, bbox.bmax.x,bbox.bmax.y,bbox.bmax.z, __float_as_int(bbox.bmin.w), bestL.bmin.x,bestL.bmin.y,bestL.bmin.z, bestL.bmax.x,bestL.bmax.y,bestL.bmax.z, __float_as_int(bestL.bmin.w), bestR.bmin.x,bestR.bmin.y,bestR.bmin.z, bestR.bmax.x,bestR.bmax.y,bestR.bmax.z, __float_as_int(bestR.bmin.w), new_split ); } #endif } else { // allocate a leaf const int leaf_id = alloc<1>( true, n_leaves, warp_tid, warp_broadcast + warp_id ); qin.splits[ id ] = Split( -1, leaf_id ); // set this node as a leaf nodes[ node_id ] = Bvh_node( Bvh_node::kLeaf, leaf_id, skip_node ); } } } /// /// Find the best SAH split plane for each node in the input task queue, and generate child tasks /// void sah_split( const uint32 BINS, Bins bins, Queue qin, const int input_node_offset, Queue qout, uint32* n_output, int output_node_offset, Bvh_node* nodes, uint32* n_leaves, const uint32 max_leaf_size, const float max_cost) { const uint32 BLOCK_SIZE = SAH_SINGLE_WARP ? 32 : 256; const size_t max_blocks = SAH_SINGLE_WARP ? 1 : thrust::detail::backend::cuda::arch::max_active_blocks(sah_split_kernel, BLOCK_SIZE, 0); const size_t n_blocks = nih::min( max_blocks, (qin.size + BLOCK_SIZE-1) / BLOCK_SIZE ); sah_split_kernel<<<n_blocks,BLOCK_SIZE>>> ( BINS, bins, qin, input_node_offset, qout, n_output, output_node_offset, nodes, n_leaves, max_leaf_size, max_cost ); cudaThreadSynchronize(); } /// /// CUDA kernel: assign the objects to their new task, or to a node if there was no split /// __global__ void distribute_objects_kernel( const uint32 BINS, Objects objects, const int n_objects, Queue queue, const int input_node_offset, Bins bins) { typedef Split Split; const uint32 grid_size = gridDim.x * blockDim.x; // loop through all logical blocks associated to this physical one for (uint32 base_idx = blockIdx.x * blockDim.x; base_idx < n_objects; base_idx += grid_size) { const uint32 idx = threadIdx.x + base_idx; if (idx >= n_objects) return; const uint32 id = idx; // check if the object has already been assigned to a node const int node_id = objects.node_ids[id]; if (node_id > -1) continue; const int4 bin_id = objects.bin_ids[idx]; const int split_id = objects.split_ids[id]; const Split split = queue.splits[split_id]; const int32 new_split_id = split.task_id; // if the node has not been split, we have to assign its objects if(new_split_id == -1) { const int32 leaf_id = split.best_plane; objects.node_ids[id] = leaf_id; continue; } // assign the object to its new task const int32 best_split = split.best_plane; const int32 selected_bin = best_split < BINS ? bin_id.x : (best_split < 2 * BINS ? bin_id.y : bin_id.z); // select the axis&bin of the best split objects.split_ids[id] = selected_bin <= (best_split & (BINS-1)) ? new_split_id : new_split_id + 1; } } /// /// Assign the objects to their new task, or to a node if there was no split /// void distribute_objects( const uint32 BINS, Objects objects, const int n_objects, Queue queue, const int input_node_offset, Bins bins) { const uint32 BLOCK_SIZE = SAH_SINGLE_WARP ? 32 : 256; const size_t max_blocks = SAH_SINGLE_WARP ? 1 : thrust::detail::backend::cuda::arch::max_active_blocks(distribute_objects_kernel, BLOCK_SIZE, 0); const size_t n_blocks = nih::min( max_blocks, (n_objects + BLOCK_SIZE-1) / BLOCK_SIZE ); distribute_objects_kernel<<<n_blocks,BLOCK_SIZE>>> ( BINS, objects, n_objects, queue, input_node_offset, bins ); cudaThreadSynchronize(); } /// /// CUDA kernel: setup the leaf array /// __global__ void setup_leaves_kernel( const int n_objects, const int32* leaf_ids, uint2* leaves) { const uint32 grid_size = gridDim.x * blockDim.x; // loop through all logical blocks associated to this physical one for (uint32 base_idx = blockIdx.x * blockDim.x; base_idx < n_objects; base_idx += grid_size) { const uint32 id = threadIdx.x + base_idx; if (id >= n_objects) continue; const int32 leaf_id = leaf_ids[ id ]; const int32 prev_leaf_id = id == 0 ? -1 : leaf_ids[ id-1 ]; const int32 next_leaf_id = id == n_objects-1 ? -1 : leaf_ids[ id+1 ]; if (prev_leaf_id != leaf_id) leaves[ leaf_id ].x = id; if (next_leaf_id != leaf_id) leaves[ leaf_id ].y = id+1; } } /// /// Setup the leaf array /// void setup_leaves( const int n_objects, const int32* leaf_ids, uint2* leaves) { const uint32 BLOCK_SIZE = SAH_SINGLE_WARP ? 32 : 256; const size_t max_blocks = SAH_SINGLE_WARP ? 1 : thrust::detail::backend::cuda::arch::max_active_blocks(setup_leaves_kernel, BLOCK_SIZE, 0); const size_t n_blocks = nih::min( max_blocks, (n_objects + BLOCK_SIZE-1) / BLOCK_SIZE ); setup_leaves_kernel<<<n_blocks,BLOCK_SIZE>>> ( n_objects, leaf_ids, leaves ); cudaThreadSynchronize(); } } // namespace binned_sah } // namespace cuda } // namespace nih
021a7855072c5e54f2509a8b197be678cf3f9128.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include "timerc.h" #define PHI 0x9e3779b9 // int ITERATIONS = INT_MAX/16; #define ITERATIONS 1024*128 #define THRESHOLD 0.5 #define THREADSPERBLOCK 1024 #define NUMSTREAMS 16 __device__ static uint32_t Q[4096], c = 362436; __device__ void init_rand(uint32_t x) { int i; Q[0] = x; Q[1] = x + PHI; Q[2] = x + PHI + PHI; for (i = 3; i < 4096; i++) Q[i] = Q[i - 3] ^ Q[i - 2] ^ PHI ^ i; } __device__ uint32_t rand_cmwc(void) { uint64_t t, a = 18782LL; static uint32_t i = 4095; uint32_t x, r = 0xfffffffe; i = (i + 1) & 4095; t = a * Q[i] + c; c = (t >> 32); x = t + c; if (x < c) { x++; c++; } return (Q[i] = r - x); } __device__ int randInRange(int min, int max, uint32_t seed) { return min + rand_cmwc() % (max + 1 - min); } /* * RETURNS: d, distance from point p to the line Ax + By = C */ __host__ __device__ double distanceFromLine(double x, double y, double a, double b, double c) { double d = abs((a * x + b * y + c)) / (sqrt(a * a + b * b)); return d; } /* * RETURNS: [A, B, C] for a line equation */ __host__ __device__ double *lineFromPoints(double *out, double x1, double y1, double x2, double y2) { out[0] = y1 - y2; out[1] = x2 - x1; out[2] = (x1-x2)*y1 + (y2-y1)*x1; return out; } // https://blog.csdn.net/qianhen123/article/details/80785091 // clc; // clear all; // % // src=[1,4,6;3,7,11]; // p=[src;ones(1,3)]; // m=[1.23,0.67,2.5; // -3.45,1.18,-4.3; // 0, 0, 1]; // q=m*p; A=M*B // %1. // x=1; // y=2; // px01=p(x,1)-p(x,2);px12=p(x,2)-p(x,3);px02=p(x,1)-p(x,3); // py01=p(y,1)-p(y,2);py12=p(y,2)-p(y,3);py02=p(y,1)-p(y,3); // qx01=q(x,1)-q(x,2);qx12=q(x,2)-q(x,3);qx02=q(x,1)-q(x,3); // qy01=q(y,1)-q(y,2);qy12=q(y,2)-q(y,3);qy02=q(y,1)-q(y,3); // %2. // det_m=px02*py12-px12*py02; // m00=(qx01*py12-qx12*py01)/(det_m); // m01=(qx12*px01-qx01*px12)/(det_m); // m10=(qy01*py12-qy12*py01)/(det_m); // m11=(qy12*px01-qy01*px12)/(det_m); // %3. // m02=q(x,1)-m00*p(x,1)-m01*p(y,1); // m12=q(y,1)-m10*p(x,1)-m11*p(y,1); // %4. // affine_matrix=[m00,m01,m02; // m10,m11,m12; // 0, 0, 1]; __host__ __device__ double model_residual(const double * const affine_matrix, const double& A_x, const double& A_y, const double& B_x, const double& B_y) { // q=m*p; A=M*B double pre_x = B_x * affine_matrix[0] + B_y * affine_matrix[1] + affine_matrix[2]; double pre_y = B_x * affine_matrix[3] + B_y * affine_matrix[4] + affine_matrix[5]; double d = sqrt( (pre_x - A_x)*(pre_x - A_x) + (pre_y - A_y)*(pre_y - A_y) );//residual return d; } // __host__ __device__ double *AffineModelFromPoints(double *affine_matrix, const double& A_x1, const double& A_y1, const double& A_x2, const double& A_y2, const double& A_x3, const double& A_y3, const double& B_x1, const double& B_y1, const double& B_x2, const double& B_y2, const double& B_x3, const double& B_y3) { // q=m*p; A=M*B double px12 = B_x1 - B_x2; double px13 = B_x1 - B_x3; double px23 = B_x2 - B_x3; double py12 = B_y1 - B_y2; double py13 = B_y1 - B_y3; double py23 = B_y2 - B_y3; double qx12 = A_x1 - A_x2; double qx13 = A_x1 - A_x3; double qx23 = A_x2 - A_x3; double qy12 = A_y1 - A_y2; double qy13 = A_y1 - A_y3; double qy23 = A_y2 - A_y3; // %2. double det_p=px13*py23-px23*py13; double m00=(qx12*py23-qx23*py12)/(det_p); double m01=(qx23*px12-qx12*px23)/(det_p); double m10=(qy12*py23-qy23*py12)/(det_p); double m11=(qy23*px12-qy12*px23)/(det_p); // %3. double m02 = A_x1 - m00 * B_x1 - m01 * B_y1; double m12 = A_y1 - m10 * B_x1 - m11 * B_y1; // %4. // affine_matrix=[m00,m12,m13; // m10,m11,m12; // 0, 0, 1]; affine_matrix[0] = m00; affine_matrix[1] = m01; affine_matrix[2] = m02; affine_matrix[3] = m10; affine_matrix[4] = m11; affine_matrix[5] = m12; affine_matrix[6] = 0; affine_matrix[7] = 0; affine_matrix[8] = 1; return affine_matrix; } /* * data A set of observations. * lineArr - Container for optimal model parameters outputted by the algorithm * max_trials Maximum number of iterations allowed in the algorithm. * t threshold value to determine data points that are fit well by model. * d Number of close data points required to assert that a model fits well to data. * seed - Random seed for a RNG on device * numStreams - Number of streams running this function. Set to 1 for testing multi-thread performance * stream - Index of the current stream used to offset data and lineArr. Used for debugging [xi yi] xiyi i M[A_pt_i B_pt_i] A_pt_iB_pt_i A=MB M */ // d_A_points, d_B_points, matched_pts, scopeSize, inline_threshold, stop_sample_num, seed, d_affineModel, maxinlines_nums_PerThread __global__ void ransac_gpu_optimal(const double *A_Pts, const double *B_Pts, int matched_pts, int scopeSize, int inline_threshold, int stop_sample_num, uint32_t seed, double *d_affineModel_Arr, int* maxinlines_nums_PerThread) { init_rand(seed); maxinlines_nums_PerThread[threadIdx.x] = 0; int r, inliers; int maxInliers = 0; // int scopeSize = max_trials / THREADSPERBLOCK / numStreams; // int offset = 2 * threadIdx.x * scopeSize;//scopeSize step double bestA, bestB, bestC, A_x1, A_y1, A_x2, A_y2, A_x3, A_y3,B_x1, B_y1, B_x2, B_y2, B_x3, B_y3, residual; // double *A_shiftedData = &A_Pts[offset]; // double *B_shiftedData = &B_Pts[offset]; double *d_affineModel = &d_affineModel_Arr[threadIdx.x * 9]; // thread responsiable for data in scope for (int i=0; i < scopeSize; i++) { inliers = 0; /******************* CHOOSING RANDOM LINE *******************/ // Choosing first random point r = randInRange(0, 2*matched_pts - 1, seed); // printf("r: %d \n", r); A_x1 = A_Pts[r]; A_y1 = A_Pts[r+1]; B_x1 = B_Pts[r]; B_y1 = B_Pts[r+1]; // Choosing second random point r = randInRange(0, 2*matched_pts - 1, seed); // printf("r: %d \n", r); A_x2 = A_Pts[r]; A_y2 = A_Pts[r+1]; B_x2 = B_Pts[r]; B_y2 = B_Pts[r+1]; // Choosing second random point r = randInRange(0, 2*matched_pts - 1, seed); // printf("r: %d \n", r); A_x3 = A_Pts[r]; A_y3 = A_Pts[r+1]; B_x3 = B_Pts[r]; B_y3 = B_Pts[r+1]; // Modeling a line between those two points // line = lineFromPoints(line, x1, y1, x2, y2); // printf("start get model \n"); d_affineModel = AffineModelFromPoints(d_affineModel, A_x1, A_y1, A_x2, A_y2, A_x3, A_y3, B_x1, B_y1, B_x2, B_y2, B_x3, B_y3); /*********************** FINDING INLIERS FOR LINE ***********************/ // printf("start calculate residual \n"); for (int j=0; j < 2*matched_pts; j=j+2) { A_x1 = A_Pts[j]; A_y1 = A_Pts[j + 1]; B_x1 = B_Pts[j]; B_y1 = B_Pts[j + 1]; // dist = distanceFromLine(x1, y1, line[0], line[1], line[2]); residual = model_residual(d_affineModel, A_x1, A_y1, B_x1, B_y1); // if(threadIdx.x==56) // printf("%d residual:%f A_x1:%f B_y1:%f\n",threadIdx.x, residual, A_x1, B_y1); if (residual <= 20) { inliers++; } } if (inliers > maxInliers) { // printf("inlines %d \n", inliers); maxInliers = inliers; // bestA = line[0]; // bestB = line[1]; // bestC = line[2]; } // if (maxInliers >= ( stop_sample_num / THREADSPERBLOCK)) { // break; // } } maxinlines_nums_PerThread[threadIdx.x] = maxInliers; printf("maxinlines_nums_PerThread[threadIdx.x]: %d = maxInliers: %d \n", maxinlines_nums_PerThread[threadIdx.x] , maxInliers); // Some reduction // if (bestA == -bestB) { // bestA = 1; // bestB = -1; // } // lineArr[threadIdx.x * 3] = bestA; // lineArr[threadIdx.x * 3 + 1] = bestB; // lineArr[threadIdx.x * 3 + 2] = bestC; // Print out only some of the output to check correctness // if (threadIdx.x == 0 && stream % 4 == 0) { // printf("GPU w/ Streams: A=%f | B=%f | C=%f \n", bestA, bestB, bestC); // } // __syncthreads(); } void ransac_cpu(double *data, double *line, int k, int t, int d){ srand(time(NULL)); int r, inliers; int maxInliers = 0; double bestA, bestB, bestC, x1, y1, x2, y2, dist; for (int i=0; i < k; i++) { inliers = 0; /******************* CHOOSING RANDOM LINE *******************/ // Choosing first random point r = 1 + rand() % k; x1 = data[r]; y1 = data[r+1]; // Choosing second random point r = 1 + rand() % k; x2 = data[r]; y2 = data[r+1]; // Modeling a line between those two points line = lineFromPoints(line, x1, y1, x2, y2); /*********************** FINDING INLIERS FOR LINE ***********************/ for (int j=0; j < k; j=j+2) { x1 = data[j*2]; y1 = data[j*2 + 1]; dist = distanceFromLine(x1, y1, line[0], line[1], line[2]); if (dist <= t) { inliers++; } } if (inliers > maxInliers) { maxInliers = inliers; bestA = line[0]; bestB = line[1]; bestC = line[2]; } if (maxInliers >= d) break; } // Some reduction if (bestA == -bestB) { bestA = 1; bestB = -1; } line[0] = bestA; line[1] = bestB; line[2] = bestC; } int ransac_gpu(double *A_points, double *B_points, const int matched_pts, int min_samples=3, float inline_threshold=20, int max_trials=4096){ if (max_trials % 32 != 0){ return -1; } // int threads_nums = max_trials <= THREADSPERBLOCK ? max_trials:THREADSPERBLOCK; printf("threads_nums: %d \n", threads_nums); int scopeSize = max_trials / threads_nums ;/// numStreams printf("scopeSize: %d \n", scopeSize); double *d_A_points, *d_B_points; hipMalloc((void **) &d_A_points, (2*matched_pts*sizeof(double))); hipMalloc((void **) &d_B_points, (2*matched_pts*sizeof(double))); hipMemcpy(d_A_points, A_points, (2*matched_pts*sizeof(double)), hipMemcpyHostToDevice); hipMemcpy(d_B_points, B_points, (2*matched_pts*sizeof(double)), hipMemcpyHostToDevice); // model parameter double *affineModel; double *d_affineModel; // Each thread will need it's own line equation container affineModel = (double *) malloc(9 * threads_nums * sizeof(double)); hipMalloc((void **) &d_affineModel, (9 * threads_nums * sizeof(double))); // threadinlines int *maxinlines_nums_PerThread; int *d_maxinlines_nums_PerThread; // Each thread will need it's own line equation container maxinlines_nums_PerThread = (int *) malloc(threads_nums * sizeof(int)); hipMalloc((void **) &d_maxinlines_nums_PerThread, (threads_nums * sizeof(int))); hipMemcpy(d_maxinlines_nums_PerThread, maxinlines_nums_PerThread, (threads_nums * sizeof(int)), hipMemcpyHostToDevice); int stop_sample_num = 8*matched_pts/10; uint32_t seed = time(NULL); printf("stop_sample_num: %d \n", stop_sample_num); hipLaunchKernelGGL(( ransac_gpu_optimal), dim3(1), dim3(threads_nums), 0, 0, d_A_points, d_B_points, matched_pts, scopeSize, inline_threshold, stop_sample_num, seed, d_affineModel, d_maxinlines_nums_PerThread); hipMemcpy(maxinlines_nums_PerThread, d_maxinlines_nums_PerThread, (threads_nums * sizeof(int)), hipMemcpyDeviceToHost); hipMemcpy(affineModel, d_affineModel, (9 * threads_nums * sizeof(double)), hipMemcpyDeviceToHost); // int max_inlines_nums = 0; for (int j=0; j < threads_nums; ++j) { // x1 = data[j*2]; // y1 = data[j*2 + 1]; // dist = distanceFromLine(x1, y1, line[0], line[1], line[2]); // if (dist <= t) { // inliers++; // } if (max_inlines_nums < maxinlines_nums_PerThread[j]) max_inlines_nums = maxinlines_nums_PerThread[j]; printf("maxinlines_nums_PerThread[j]: %d \n", maxinlines_nums_PerThread[j]); } printf("max_inlines_nums: %d \n", max_inlines_nums); hipFree(d_A_points); hipFree(d_B_points); hipFree(d_affineModel); hipFree(d_maxinlines_nums_PerThread); free(affineModel); free(maxinlines_nums_PerThread); return max_inlines_nums; } #include <stdio.h> #include <stdlib.h> #include <string.h> #define MAX_LINE 1024 int read_data(const char* file_path, double *A_points, double *B_points) { char buf[MAX_LINE]; /**/ FILE *fp; /**/ int len; /**/ const char *separator = " "; if((fp = fopen(file_path,"r")) == NULL) { perror("fail to read"); exit (1) ; } int line = 0; while(fgets(buf, MAX_LINE,fp) != NULL) { len = strlen(buf); buf[len-1] = '\0'; /**/ printf("%s %d \n",buf,len - 1); char *pNext; int count = 0; if (buf == NULL || strlen(buf) == 0) //0 return 0; if (separator == NULL || strlen(separator) == 0) // return 0; pNext = (char *)strtok(buf,separator); //(char *)() // printf(" %s %d", pNext, atoi(pNext)); // while(pNext != NULL) { // *dest++ = pNext; // ++count; // pNext = (char *)strtok(NULL,separator); //(char *) // } A_points[2*line] = atof(pNext); // printf(" %f %f \n", atof(pNext), A_points[2*line]); pNext = (char *)strtok(NULL,separator); //(char *) // printf(" %s %d", pNext, atoi(pNext)); A_points[2*line+1] = atof(pNext); pNext = (char *)strtok(NULL,separator); //(char *) // printf(" %s %d", pNext, atoi(pNext)); B_points[2*line] = atof(pNext); pNext = (char *)strtok(NULL,separator); //(char *) // printf(" %s %d\n", pNext, atoi(pNext)); B_points[2*line+1] = atof(pNext); printf("%f %f %f %f \n",A_points[2*line],A_points[2*line+1],B_points[2*line],B_points[2*line+1]); line += 1; } return line; } int main(int argc, char **argv) { if(argc !=2){ printf("please input filename\n"); return 0; } char * filename = argv[1]; const int matched_pts=182; double *A_points = (double *)malloc(2*matched_pts*sizeof(double)); double *B_points = (double *)malloc(2*matched_pts*sizeof(double)); char *path = "/media/liesmars/67038e2e-f9b3-41a0-b779-e53a1ca1fd8a1/scene_pic/pic-web-service/src/streetView_index/utils/test_data/6.txt"; int match_pts = read_data(filename, A_points, B_points); printf("match pts: %d \n", match_pts); float gpu_multi_thread_time; gstart(); ransac_gpu(A_points, B_points, match_pts); gend(&gpu_multi_thread_time); printf("GPU w/ Multi-thread time: %f\n", gpu_multi_thread_time); free(A_points); free(B_points); // uint32_t seed = time(NULL); // int match_pts_num = 100;//1024 // srand(seed); // int r; // int pass = ITERATIONS / 2;//1/2 // /* // * Every two elements corresponds to x,y at time t. // * // */ // // double *A_points = (double *) malloc(2*match_pts_num*sizeof(double)); // // double *d_A_points; // // double *A_points = (double *) malloc(2*match_pts_num*sizeof(double)); // // double *d_A_points; // // // Move points with velocity (vx, vy) // // double vx = 100.0; // // double vy = 100.0; // // for (int j=0; j < ITERATIONS; j++) { // // if (j % 10 == 0) { // // r = 0 + rand() % ITERATIONS; // // points[j*2] = r; // // r = 0 + rand() % ITERATIONS; // // points[j*2+1] = r; // // } else { // // points[j*2] = j-1 + vx; // // points[j*2+1] = j-1 + vy; // // } // // } // // Shell to be used for outputting results in the form of line equation // double *line = (double *) malloc(3*sizeof(double)); // // Copy points to file // FILE *fp; // fp = fopen("p.txt", "w+"); // for (int i=0; i < 2*ITERATIONS; i++) { // fprintf(fp,"%f ", points[i]); // } // fclose(fp); // float cpu_time; // cstart(); // ransac_cpu(points, line, ITERATIONS, THRESHOLD, pass); // cend(&cpu_time); // printf("CPU: A=%f | B=%f | C=%f \n", line[0], line[1], line[2]); // puts("***"); // // hipMalloc((void **) &d_points, (2*ITERATIONS*sizeof(double))); // // hipMemcpy(d_points, points, (2*ITERATIONS*sizeof(double)), hipMemcpyHostToDevice); // double *affineModel; // double *d_affineModel; // // Each thread will need it's own line equation container // affineModel = (double *) malloc(9*THREADSPERBLOCK*sizeof(double)); // hipMalloc((void **) &d_affineModel, (9*THREADSPERBLOCK*sizeof(double))); // hipMemcpy(d_affineModel, affineModel, (9*THREADSPERBLOCK*sizeof(double)), hipMemcpyHostToDevice); // float gpu_multi_thread_time; // gstart(); // ransac_gpu_optimal<<<1,THREADSPERBLOCK>>>(d_points, d_affineModel, ITERATIONS, THRESHOLD, pass / THREADSPERBLOCK, seed, 1, 0); // gend(&gpu_multi_thread_time); // hipMemcpy(affineModel, d_affineModel, (9*THREADSPERBLOCK*sizeof(double)), hipMemcpyDeviceToHost); // // // double avgA = 0; // double avgB = 0; // double avgC = 0; // for (int i=0; i<3*THREADSPERBLOCK; i=i+3) { // avgA = avgA + lineArr[i]; // avgB = avgB + lineArr[i+1]; // avgC = avgC + lineArr[i+2]; // } // avgA = avgA / THREADSPERBLOCK; // avgB = avgB / THREADSPERBLOCK; // avgC = avgC / THREADSPERBLOCK; // printf("GPU w/Threads: A=%f | B=%f | C=%f \n", avgA, avgB, avgC); // puts("***"); // hipDeviceSynchronize(); // double *lineStreamArr; // double *d_lineStreamArr; // // Each thread will need it's only line equation container // lineStreamArr = (double *) malloc(3*NUMSTREAMS*THREADSPERBLOCK*sizeof(double)); // hipMalloc((void **) &d_lineStreamArr, (3*NUMSTREAMS*THREADSPERBLOCK*sizeof(double))); // int streamSize = (2 * ITERATIONS) / NUMSTREAMS; // int streamBytes = streamSize * sizeof(double); // hipStream_t stream[NUMSTREAMS]; // for (int i = 0; i < NUMSTREAMS; ++i) // hipStreamCreate(&stream[i]); // float gpu_stream_time; // gstart(); // for (int i=0; i < NUMSTREAMS; i++) { // int offset = i * streamSize; // int lineOffset = 3 * i * THREADSPERBLOCK; // hipMemcpyAsync(&d_points[offset], &points[offset], streamBytes, hipMemcpyHostToDevice, stream[i]); // hipMemcpyAsync(&d_lineStreamArr[lineOffset], &lineStreamArr[lineOffset], 3*THREADSPERBLOCK*sizeof(double), hipMemcpyHostToDevice, stream[i]); // ransac_gpu_optimal<<<1, THREADSPERBLOCK, 0, stream[i]>>>(&d_points[offset], &d_lineStreamArr[lineOffset], ITERATIONS, THRESHOLD, pass / THREADSPERBLOCK, seed, NUMSTREAMS, i); // } // gend(&gpu_stream_time); // hipDeviceSynchronize(); // for (int i = 0; i < NUMSTREAMS; ++i) // hipStreamDestroy(stream[i]); // hipMemcpy(lineStreamArr, d_lineStreamArr, (3*NUMSTREAMS*THREADSPERBLOCK*sizeof(double)), hipMemcpyDeviceToHost); // avgA = 0; // avgB = 0; // avgC = 0; // for (int i=0; i<3*NUMSTREAMS*THREADSPERBLOCK; i=i+3) { // avgA = avgA + lineStreamArr[i]; // avgB = avgB + lineStreamArr[i+1]; // avgC = avgC + lineStreamArr[i+2]; // } // avgA = avgA / THREADSPERBLOCK / NUMSTREAMS; // avgB = avgB / THREADSPERBLOCK / NUMSTREAMS; // avgC = avgC / THREADSPERBLOCK / NUMSTREAMS; // printf("GPU w/Streams: A=%f | B=%f | C=%f \n", avgA, avgB, avgC); // // for (int b=0; b<9; b=b+3) { // // printf("GPU w/Streams: A=%f | B=%f | C=%f \n", lineStreamArr[b], lineStreamArr[b+1], lineStreamArr[b+2]); // // } // puts("***\n"); // hipDeviceSynchronize(); // printf("CPU time: %f\n",cpu_time); // printf("GPU w/ Multi-thread time: %f\n", gpu_multi_thread_time); // printf("GPU w/ Streams time: %f\n", gpu_stream_time); // hipFree(d_points); // hipFree(d_lineArr); // hipFree(d_lineStreamArr); // free(points); // free(line); // // free(lineArr); // free(lineStreamArr); // return 0; }
021a7855072c5e54f2509a8b197be678cf3f9128.cu
#include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include "timerc.h" #define PHI 0x9e3779b9 // int ITERATIONS = INT_MAX/16; #define ITERATIONS 1024*128 #define THRESHOLD 0.5 #define THREADSPERBLOCK 1024 #define NUMSTREAMS 16 __device__ static uint32_t Q[4096], c = 362436; __device__ void init_rand(uint32_t x) { int i; Q[0] = x; Q[1] = x + PHI; Q[2] = x + PHI + PHI; for (i = 3; i < 4096; i++) Q[i] = Q[i - 3] ^ Q[i - 2] ^ PHI ^ i; } __device__ uint32_t rand_cmwc(void) { uint64_t t, a = 18782LL; static uint32_t i = 4095; uint32_t x, r = 0xfffffffe; i = (i + 1) & 4095; t = a * Q[i] + c; c = (t >> 32); x = t + c; if (x < c) { x++; c++; } return (Q[i] = r - x); } __device__ int randInRange(int min, int max, uint32_t seed) { return min + rand_cmwc() % (max + 1 - min); } /* * RETURNS: d, distance from point p to the line Ax + By = C */ __host__ __device__ double distanceFromLine(double x, double y, double a, double b, double c) { double d = abs((a * x + b * y + c)) / (sqrt(a * a + b * b)); return d; } /* * RETURNS: [A, B, C] for a line equation */ __host__ __device__ double *lineFromPoints(double *out, double x1, double y1, double x2, double y2) { out[0] = y1 - y2; out[1] = x2 - x1; out[2] = (x1-x2)*y1 + (y2-y1)*x1; return out; } // https://blog.csdn.net/qianhen123/article/details/80785091 // clc; // clear all; // %模拟输入 // src=[1,4,6;3,7,11]; // p=[src;ones(1,3)]; // m=[1.23,0.67,2.5; // -3.45,1.18,-4.3; // 0, 0, 1]; // q=m*p; A=M*B // %1.获得临时临时变量 // x=1; // y=2; // px01=p(x,1)-p(x,2);px12=p(x,2)-p(x,3);px02=p(x,1)-p(x,3); // py01=p(y,1)-p(y,2);py12=p(y,2)-p(y,3);py02=p(y,1)-p(y,3); // qx01=q(x,1)-q(x,2);qx12=q(x,2)-q(x,3);qx02=q(x,1)-q(x,3); // qy01=q(y,1)-q(y,2);qy12=q(y,2)-q(y,3);qy02=q(y,1)-q(y,3); // %2.计算旋转放缩因子 // det_m=px02*py12-px12*py02; // m00=(qx01*py12-qx12*py01)/(det_m); // m01=(qx12*px01-qx01*px12)/(det_m); // m10=(qy01*py12-qy12*py01)/(det_m); // m11=(qy12*px01-qy01*px12)/(det_m); // %3.计算平移因子 // m02=q(x,1)-m00*p(x,1)-m01*p(y,1); // m12=q(y,1)-m10*p(x,1)-m11*p(y,1); // %4.实际输出仿射矩阵 // affine_matrix=[m00,m01,m02; // m10,m11,m12; // 0, 0, 1]; __host__ __device__ double model_residual(const double * const affine_matrix, const double& A_x, const double& A_y, const double& B_x, const double& B_y) { // q=m*p; A=M*B double pre_x = B_x * affine_matrix[0] + B_y * affine_matrix[1] + affine_matrix[2]; double pre_y = B_x * affine_matrix[3] + B_y * affine_matrix[4] + affine_matrix[5]; double d = sqrt( (pre_x - A_x)*(pre_x - A_x) + (pre_y - A_y)*(pre_y - A_y) );//residual return d; } // 计算仿射变换矩阵系数 __host__ __device__ double *AffineModelFromPoints(double *affine_matrix, const double& A_x1, const double& A_y1, const double& A_x2, const double& A_y2, const double& A_x3, const double& A_y3, const double& B_x1, const double& B_y1, const double& B_x2, const double& B_y2, const double& B_x3, const double& B_y3) { // q=m*p; A=M*B double px12 = B_x1 - B_x2; double px13 = B_x1 - B_x3; double px23 = B_x2 - B_x3; double py12 = B_y1 - B_y2; double py13 = B_y1 - B_y3; double py23 = B_y2 - B_y3; double qx12 = A_x1 - A_x2; double qx13 = A_x1 - A_x3; double qx23 = A_x2 - A_x3; double qy12 = A_y1 - A_y2; double qy13 = A_y1 - A_y3; double qy23 = A_y2 - A_y3; // %2.计算旋转放缩因子 double det_p=px13*py23-px23*py13; double m00=(qx12*py23-qx23*py12)/(det_p); double m01=(qx23*px12-qx12*px23)/(det_p); double m10=(qy12*py23-qy23*py12)/(det_p); double m11=(qy23*px12-qy12*px23)/(det_p); // %3.计算平移因子 double m02 = A_x1 - m00 * B_x1 - m01 * B_y1; double m12 = A_y1 - m10 * B_x1 - m11 * B_y1; // %4.实际输出仿射矩阵 // affine_matrix=[m00,m12,m13; // m10,m11,m12; // 0, 0, 1]; affine_matrix[0] = m00; affine_matrix[1] = m01; affine_matrix[2] = m02; affine_matrix[3] = m10; affine_matrix[4] = m11; affine_matrix[5] = m12; affine_matrix[6] = 0; affine_matrix[7] = 0; affine_matrix[8] = 1; return affine_matrix; } /* * data – A set of observations. * lineArr - Container for optimal model parameters outputted by the algorithm * max_trials – Maximum number of iterations allowed in the algorithm. * t – threshold value to determine data points that are fit well by model. * d – Number of close data points required to assert that a model fits well to data. * seed - Random seed for a RNG on device * numStreams - Number of streams running this function. Set to 1 for testing multi-thread performance * stream - Index of the current stream used to offset data and lineArr. Used for debugging 如果是线性模型的话 就是拟合一条直线,输入数据是[xi yi] xi是源数据 yi是目标数据 i是第i条数据 如果是仿射模型的话 就是拟合一个仿射矩阵M,输入数据是[A_pt_i B_pt_i] A_pt_i是源原图像坐标B_pt_i是目标图像坐标 A=MB 求M */ // d_A_points, d_B_points, matched_pts, scopeSize, inline_threshold, stop_sample_num, seed, d_affineModel, maxinlines_nums_PerThread __global__ void ransac_gpu_optimal(const double *A_Pts, const double *B_Pts, int matched_pts, int scopeSize, int inline_threshold, int stop_sample_num, uint32_t seed, double *d_affineModel_Arr, int* maxinlines_nums_PerThread) { init_rand(seed); maxinlines_nums_PerThread[threadIdx.x] = 0; int r, inliers; int maxInliers = 0; // int scopeSize = max_trials / THREADSPERBLOCK / numStreams; // int offset = 2 * threadIdx.x * scopeSize;//scopeSize step double bestA, bestB, bestC, A_x1, A_y1, A_x2, A_y2, A_x3, A_y3,B_x1, B_y1, B_x2, B_y2, B_x3, B_y3, residual; // double *A_shiftedData = &A_Pts[offset]; // double *B_shiftedData = &B_Pts[offset]; double *d_affineModel = &d_affineModel_Arr[threadIdx.x * 9]; // 每个thread responsiable for data in scope for (int i=0; i < scopeSize; i++) { inliers = 0; /******************* CHOOSING RANDOM LINE *******************/ // Choosing first random point r = randInRange(0, 2*matched_pts - 1, seed); // printf("r: %d \n", r); A_x1 = A_Pts[r]; A_y1 = A_Pts[r+1]; B_x1 = B_Pts[r]; B_y1 = B_Pts[r+1]; // Choosing second random point r = randInRange(0, 2*matched_pts - 1, seed); // printf("r: %d \n", r); A_x2 = A_Pts[r]; A_y2 = A_Pts[r+1]; B_x2 = B_Pts[r]; B_y2 = B_Pts[r+1]; // Choosing second random point r = randInRange(0, 2*matched_pts - 1, seed); // printf("r: %d \n", r); A_x3 = A_Pts[r]; A_y3 = A_Pts[r+1]; B_x3 = B_Pts[r]; B_y3 = B_Pts[r+1]; // Modeling a line between those two points // line = lineFromPoints(line, x1, y1, x2, y2); // printf("start get model \n"); d_affineModel = AffineModelFromPoints(d_affineModel, A_x1, A_y1, A_x2, A_y2, A_x3, A_y3, B_x1, B_y1, B_x2, B_y2, B_x3, B_y3); /*********************** FINDING INLIERS FOR LINE ***********************/ // printf("start calculate residual \n"); for (int j=0; j < 2*matched_pts; j=j+2) { A_x1 = A_Pts[j]; A_y1 = A_Pts[j + 1]; B_x1 = B_Pts[j]; B_y1 = B_Pts[j + 1]; // dist = distanceFromLine(x1, y1, line[0], line[1], line[2]); residual = model_residual(d_affineModel, A_x1, A_y1, B_x1, B_y1); // if(threadIdx.x==56) // printf("%d residual:%f A_x1:%f B_y1:%f\n",threadIdx.x, residual, A_x1, B_y1); if (residual <= 20) { inliers++; } } if (inliers > maxInliers) { // printf("inlines %d \n", inliers); maxInliers = inliers; // bestA = line[0]; // bestB = line[1]; // bestC = line[2]; } // if (maxInliers >= ( stop_sample_num / THREADSPERBLOCK)) { // break; // } } maxinlines_nums_PerThread[threadIdx.x] = maxInliers; printf("maxinlines_nums_PerThread[threadIdx.x]: %d = maxInliers: %d \n", maxinlines_nums_PerThread[threadIdx.x] , maxInliers); // Some reduction // if (bestA == -bestB) { // bestA = 1; // bestB = -1; // } // lineArr[threadIdx.x * 3] = bestA; // lineArr[threadIdx.x * 3 + 1] = bestB; // lineArr[threadIdx.x * 3 + 2] = bestC; // Print out only some of the output to check correctness // if (threadIdx.x == 0 && stream % 4 == 0) { // printf("GPU w/ Streams: A=%f | B=%f | C=%f \n", bestA, bestB, bestC); // } // __syncthreads(); } void ransac_cpu(double *data, double *line, int k, int t, int d){ srand(time(NULL)); int r, inliers; int maxInliers = 0; double bestA, bestB, bestC, x1, y1, x2, y2, dist; for (int i=0; i < k; i++) { inliers = 0; /******************* CHOOSING RANDOM LINE *******************/ // Choosing first random point r = 1 + rand() % k; x1 = data[r]; y1 = data[r+1]; // Choosing second random point r = 1 + rand() % k; x2 = data[r]; y2 = data[r+1]; // Modeling a line between those two points line = lineFromPoints(line, x1, y1, x2, y2); /*********************** FINDING INLIERS FOR LINE ***********************/ for (int j=0; j < k; j=j+2) { x1 = data[j*2]; y1 = data[j*2 + 1]; dist = distanceFromLine(x1, y1, line[0], line[1], line[2]); if (dist <= t) { inliers++; } } if (inliers > maxInliers) { maxInliers = inliers; bestA = line[0]; bestB = line[1]; bestC = line[2]; } if (maxInliers >= d) break; } // Some reduction if (bestA == -bestB) { bestA = 1; bestB = -1; } line[0] = bestA; line[1] = bestB; line[2] = bestC; } int ransac_gpu(double *A_points, double *B_points, const int matched_pts, int min_samples=3, float inline_threshold=20, int max_trials=4096){ if (max_trials % 32 != 0){ return -1; } // 数据数量很少 int threads_nums = max_trials <= THREADSPERBLOCK ? max_trials:THREADSPERBLOCK; printf("threads_nums: %d \n", threads_nums); int scopeSize = max_trials / threads_nums ;/// numStreams printf("scopeSize: %d \n", scopeSize); double *d_A_points, *d_B_points; cudaMalloc((void **) &d_A_points, (2*matched_pts*sizeof(double))); cudaMalloc((void **) &d_B_points, (2*matched_pts*sizeof(double))); cudaMemcpy(d_A_points, A_points, (2*matched_pts*sizeof(double)), cudaMemcpyHostToDevice); cudaMemcpy(d_B_points, B_points, (2*matched_pts*sizeof(double)), cudaMemcpyHostToDevice); // model parameter double *affineModel; double *d_affineModel; // Each thread will need it's own line equation container affineModel = (double *) malloc(9 * threads_nums * sizeof(double)); cudaMalloc((void **) &d_affineModel, (9 * threads_nums * sizeof(double))); // 每个thread都有一个最好的inlines int *maxinlines_nums_PerThread; int *d_maxinlines_nums_PerThread; // Each thread will need it's own line equation container maxinlines_nums_PerThread = (int *) malloc(threads_nums * sizeof(int)); cudaMalloc((void **) &d_maxinlines_nums_PerThread, (threads_nums * sizeof(int))); cudaMemcpy(d_maxinlines_nums_PerThread, maxinlines_nums_PerThread, (threads_nums * sizeof(int)), cudaMemcpyHostToDevice); int stop_sample_num = 8*matched_pts/10; uint32_t seed = time(NULL); printf("stop_sample_num: %d \n", stop_sample_num); ransac_gpu_optimal<<<1, threads_nums>>>(d_A_points, d_B_points, matched_pts, scopeSize, inline_threshold, stop_sample_num, seed, d_affineModel, d_maxinlines_nums_PerThread); cudaMemcpy(maxinlines_nums_PerThread, d_maxinlines_nums_PerThread, (threads_nums * sizeof(int)), cudaMemcpyDeviceToHost); cudaMemcpy(affineModel, d_affineModel, (9 * threads_nums * sizeof(double)), cudaMemcpyDeviceToHost); // 依据内点 找出最好的模型 int max_inlines_nums = 0; for (int j=0; j < threads_nums; ++j) { // x1 = data[j*2]; // y1 = data[j*2 + 1]; // dist = distanceFromLine(x1, y1, line[0], line[1], line[2]); // if (dist <= t) { // inliers++; // } if (max_inlines_nums < maxinlines_nums_PerThread[j]) max_inlines_nums = maxinlines_nums_PerThread[j]; printf("maxinlines_nums_PerThread[j]: %d \n", maxinlines_nums_PerThread[j]); } printf("max_inlines_nums: %d \n", max_inlines_nums); cudaFree(d_A_points); cudaFree(d_B_points); cudaFree(d_affineModel); cudaFree(d_maxinlines_nums_PerThread); free(affineModel); free(maxinlines_nums_PerThread); return max_inlines_nums; } #include <stdio.h> #include <stdlib.h> #include <string.h> #define MAX_LINE 1024 int read_data(const char* file_path, double *A_points, double *B_points) { char buf[MAX_LINE]; /*缓冲区*/ FILE *fp; /*文件指针*/ int len; /*行字符个数*/ const char *separator = " "; if((fp = fopen(file_path,"r")) == NULL) { perror("fail to read"); exit (1) ; } int line = 0; while(fgets(buf, MAX_LINE,fp) != NULL) { len = strlen(buf); buf[len-1] = '\0'; /*去掉换行符*/ printf("%s %d \n",buf,len - 1); char *pNext; int count = 0; if (buf == NULL || strlen(buf) == 0) //如果传入的地址为空或长度为0,直接终止 return 0; if (separator == NULL || strlen(separator) == 0) //如未指定分割的字符串,直接终止 return 0; pNext = (char *)strtok(buf,separator); //必须使用(char *)进行强制类型转换(虽然不写有的编译器中不会出现指针错误) // printf(" %s %d", pNext, atoi(pNext)); // while(pNext != NULL) { // *dest++ = pNext; // ++count; // pNext = (char *)strtok(NULL,separator); //必须使用(char *)进行强制类型转换 // } A_points[2*line] = atof(pNext); // printf(" %f %f \n", atof(pNext), A_points[2*line]); pNext = (char *)strtok(NULL,separator); //必须使用(char *)进行强制类型转换 // printf(" %s %d", pNext, atoi(pNext)); A_points[2*line+1] = atof(pNext); pNext = (char *)strtok(NULL,separator); //必须使用(char *)进行强制类型转换 // printf(" %s %d", pNext, atoi(pNext)); B_points[2*line] = atof(pNext); pNext = (char *)strtok(NULL,separator); //必须使用(char *)进行强制类型转换 // printf(" %s %d\n", pNext, atoi(pNext)); B_points[2*line+1] = atof(pNext); printf("%f %f %f %f \n",A_points[2*line],A_points[2*line+1],B_points[2*line],B_points[2*line+1]); line += 1; } return line; } int main(int argc, char **argv) { if(argc !=2){ printf("please input filename\n"); return 0; } char * filename = argv[1]; const int matched_pts=182; double *A_points = (double *)malloc(2*matched_pts*sizeof(double)); double *B_points = (double *)malloc(2*matched_pts*sizeof(double)); char *path = "/media/liesmars/67038e2e-f9b3-41a0-b779-e53a1ca1fd8a1/scene_pic/pic-web-service/src/streetView_index/utils/test_data/6.txt"; int match_pts = read_data(filename, A_points, B_points); printf("match pts: %d \n", match_pts); float gpu_multi_thread_time; gstart(); ransac_gpu(A_points, B_points, match_pts); gend(&gpu_multi_thread_time); printf("GPU w/ Multi-thread time: %f\n", gpu_multi_thread_time); free(A_points); free(B_points); // uint32_t seed = time(NULL); // int match_pts_num = 100;//1024 // srand(seed); // int r; // int pass = ITERATIONS / 2;//大于所有数据的1/2的话 // /* // * Every two elements corresponds to x,y at time t. // * 准备数据 // */ // // double *A_points = (double *) malloc(2*match_pts_num*sizeof(double)); // // double *d_A_points; // // double *A_points = (double *) malloc(2*match_pts_num*sizeof(double)); // // double *d_A_points; // // // Move points with velocity (vx, vy) // // double vx = 100.0; // // double vy = 100.0; // // for (int j=0; j < ITERATIONS; j++) { // // if (j % 10 == 0) { // // r = 0 + rand() % ITERATIONS; // // points[j*2] = r; // // r = 0 + rand() % ITERATIONS; // // points[j*2+1] = r; // // } else { // // points[j*2] = j-1 + vx; // // points[j*2+1] = j-1 + vy; // // } // // } // // Shell to be used for outputting results in the form of line equation // double *line = (double *) malloc(3*sizeof(double)); // // Copy points to file // FILE *fp; // fp = fopen("p.txt", "w+"); // for (int i=0; i < 2*ITERATIONS; i++) { // fprintf(fp,"%f ", points[i]); // } // fclose(fp); // float cpu_time; // cstart(); // ransac_cpu(points, line, ITERATIONS, THRESHOLD, pass); // cend(&cpu_time); // printf("CPU: A=%f | B=%f | C=%f \n", line[0], line[1], line[2]); // puts("***"); // // cudaMalloc((void **) &d_points, (2*ITERATIONS*sizeof(double))); // // cudaMemcpy(d_points, points, (2*ITERATIONS*sizeof(double)), cudaMemcpyHostToDevice); // double *affineModel; // double *d_affineModel; // // Each thread will need it's own line equation container // affineModel = (double *) malloc(9*THREADSPERBLOCK*sizeof(double)); // cudaMalloc((void **) &d_affineModel, (9*THREADSPERBLOCK*sizeof(double))); // cudaMemcpy(d_affineModel, affineModel, (9*THREADSPERBLOCK*sizeof(double)), cudaMemcpyHostToDevice); // float gpu_multi_thread_time; // gstart(); // ransac_gpu_optimal<<<1,THREADSPERBLOCK>>>(d_points, d_affineModel, ITERATIONS, THRESHOLD, pass / THREADSPERBLOCK, seed, 1, 0); // gend(&gpu_multi_thread_time); // cudaMemcpy(affineModel, d_affineModel, (9*THREADSPERBLOCK*sizeof(double)), cudaMemcpyDeviceToHost); // // 最后将结果汇总 平均 // double avgA = 0; // double avgB = 0; // double avgC = 0; // for (int i=0; i<3*THREADSPERBLOCK; i=i+3) { // avgA = avgA + lineArr[i]; // avgB = avgB + lineArr[i+1]; // avgC = avgC + lineArr[i+2]; // } // avgA = avgA / THREADSPERBLOCK; // avgB = avgB / THREADSPERBLOCK; // avgC = avgC / THREADSPERBLOCK; // printf("GPU w/Threads: A=%f | B=%f | C=%f \n", avgA, avgB, avgC); // puts("***"); // cudaDeviceSynchronize(); // double *lineStreamArr; // double *d_lineStreamArr; // // Each thread will need it's only line equation container // lineStreamArr = (double *) malloc(3*NUMSTREAMS*THREADSPERBLOCK*sizeof(double)); // cudaMalloc((void **) &d_lineStreamArr, (3*NUMSTREAMS*THREADSPERBLOCK*sizeof(double))); // int streamSize = (2 * ITERATIONS) / NUMSTREAMS; // int streamBytes = streamSize * sizeof(double); // cudaStream_t stream[NUMSTREAMS]; // for (int i = 0; i < NUMSTREAMS; ++i) // cudaStreamCreate(&stream[i]); // float gpu_stream_time; // gstart(); // for (int i=0; i < NUMSTREAMS; i++) { // int offset = i * streamSize; // int lineOffset = 3 * i * THREADSPERBLOCK; // cudaMemcpyAsync(&d_points[offset], &points[offset], streamBytes, cudaMemcpyHostToDevice, stream[i]); // cudaMemcpyAsync(&d_lineStreamArr[lineOffset], &lineStreamArr[lineOffset], 3*THREADSPERBLOCK*sizeof(double), cudaMemcpyHostToDevice, stream[i]); // ransac_gpu_optimal<<<1, THREADSPERBLOCK, 0, stream[i]>>>(&d_points[offset], &d_lineStreamArr[lineOffset], ITERATIONS, THRESHOLD, pass / THREADSPERBLOCK, seed, NUMSTREAMS, i); // } // gend(&gpu_stream_time); // cudaDeviceSynchronize(); // for (int i = 0; i < NUMSTREAMS; ++i) // cudaStreamDestroy(stream[i]); // cudaMemcpy(lineStreamArr, d_lineStreamArr, (3*NUMSTREAMS*THREADSPERBLOCK*sizeof(double)), cudaMemcpyDeviceToHost); // avgA = 0; // avgB = 0; // avgC = 0; // for (int i=0; i<3*NUMSTREAMS*THREADSPERBLOCK; i=i+3) { // avgA = avgA + lineStreamArr[i]; // avgB = avgB + lineStreamArr[i+1]; // avgC = avgC + lineStreamArr[i+2]; // } // avgA = avgA / THREADSPERBLOCK / NUMSTREAMS; // avgB = avgB / THREADSPERBLOCK / NUMSTREAMS; // avgC = avgC / THREADSPERBLOCK / NUMSTREAMS; // printf("GPU w/Streams: A=%f | B=%f | C=%f \n", avgA, avgB, avgC); // // for (int b=0; b<9; b=b+3) { // // printf("GPU w/Streams: A=%f | B=%f | C=%f \n", lineStreamArr[b], lineStreamArr[b+1], lineStreamArr[b+2]); // // } // puts("***\n"); // cudaDeviceSynchronize(); // printf("CPU time: %f\n",cpu_time); // printf("GPU w/ Multi-thread time: %f\n", gpu_multi_thread_time); // printf("GPU w/ Streams time: %f\n", gpu_stream_time); // cudaFree(d_points); // cudaFree(d_lineArr); // cudaFree(d_lineStreamArr); // free(points); // free(line); // // free(lineArr); // free(lineStreamArr); // return 0; }
b489622efa3e9489916680e9fa6a25d0ec3aee10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* .cuda.cu - Copyright 2019/2020 Utrecht University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include ".cuda.h" namespace lh2core { // path tracing buffers and global variables __constant__ CoreInstanceDesc* instanceDescriptors; __constant__ CUDAMaterial* materials; __constant__ CoreLightTri* triLights; __constant__ CorePointLight* pointLights; __constant__ CoreSpotLight* spotLights; __constant__ CoreDirectionalLight* directionalLights; __constant__ int4 lightCounts; // area, point, spot, directional __constant__ uchar4* argb32; __constant__ float4* argb128; __constant__ uchar4* nrm32; __constant__ float4* skyPixels; __constant__ int skywidth; __constant__ int skyheight; __constant__ PathState* pathStates; __constant__ float4* debugData; __constant__ LightCluster* lightTree; __constant__ mat4 worldToSky; // path tracer settings __constant__ __device__ float geometryEpsilon; __constant__ __device__ float clampValue; // staging: copies will be batched and carried out after rendering completes, // to allow the CPU to update the scene concurrently with GPU rendering. enum { INSTS = 0, MATS, TLGHTS, PLGHTS, SLGHTS, DLGHTS, LCNTS, RGB32, RGBH, NRMLS, SKYPIX, SKYW, SKYH, SMAT, DBGDAT, GEPS, CLMPV, LTREE }; // device pointers are not real pointers for nvcc, so we need a bit of a hack. struct StagedPtr { void* p; int id; }; struct StagedInt { int v; int id; }; struct StagedInt4 { int4 v; int id; }; struct StagedFloat3 { float3 v; int id; }; struct StagedMat { mat4 v; int id; }; struct StagedF32 { float v; int id; }; struct StagedCpy { void* d; void* s; int n; }; static std::vector<StagedPtr> stagedPtr; static std::vector<StagedInt> stagedInt; static std::vector<StagedInt4> stagedInt4; static std::vector<StagedFloat3> stagedFloat3; static std::vector<StagedMat> stagedMat; static std::vector<StagedF32> stagedF32; static std::vector<StagedCpy> stagedCpy; __host__ static void pushPtrCpy( int id, void* p ) { if (id == INSTS) hipMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) ); if (id == MATS) hipMemcpyToSymbol( materials, &p, sizeof( void* ) ); if (id == TLGHTS) hipMemcpyToSymbol( triLights, &p, sizeof( void* ) ); if (id == PLGHTS) hipMemcpyToSymbol( pointLights, &p, sizeof( void* ) ); if (id == SLGHTS) hipMemcpyToSymbol( spotLights, &p, sizeof( void* ) ); if (id == DLGHTS) hipMemcpyToSymbol( directionalLights, &p, sizeof( void* ) ); if (id == RGB32) hipMemcpyToSymbol( argb32, &p, sizeof( void* ) ); if (id == RGBH) hipMemcpyToSymbol( argb128, &p, sizeof( void* ) ); if (id == NRMLS) hipMemcpyToSymbol( nrm32, &p, sizeof( void* ) ); if (id == SKYPIX) hipMemcpyToSymbol( skyPixels, &p, sizeof( void* ) ); if (id == DBGDAT) hipMemcpyToSymbol( debugData, &p, sizeof( void* ) ); if (id == LTREE) hipMemcpyToSymbol( lightTree, &p, sizeof( void* ) ); } __host__ static void pushIntCpy( int id, const int v ) { if (id == SKYW) hipMemcpyToSymbol( skywidth, &v, sizeof( int ) ); if (id == SKYH) hipMemcpyToSymbol( skyheight, &v, sizeof( int ) ); } __host__ static void pushF32Cpy( int id, const float v ) { if (id == GEPS) hipMemcpyToSymbol( geometryEpsilon, &v, sizeof( float ) ); if (id == CLMPV) hipMemcpyToSymbol( clampValue, &v, sizeof( int ) ); } __host__ static void pushMatCpy( int id, const mat4& m ) { if (id == SMAT) hipMemcpyToSymbol( worldToSky, &m, sizeof( mat4 ) ); } __host__ static void pushInt4Cpy( int id, const int4& v ) { if (id == LCNTS) hipMemcpyToSymbol( lightCounts, &v, sizeof( int4 ) ); } __host__ static void pushFloat3Cpy( int id, const float3& v ) { // nothing here yet } #define MAXVARS 32 static void* prevPtr[MAXVARS] = {}; static int prevInt[MAXVARS] = {}; static float prevFloat[MAXVARS] = {}; static int4 prevInt4[MAXVARS] = {}; // static float3 prevFloat3[MAXVARS] = {}; static bool prevValSet[MAXVARS] = {}; __host__ static void stagePtrCpy( int id, void* p ) { if (prevPtr[id] == p) return; // not changed StagedPtr n = { p, id }; stagedPtr.push_back( n ); prevPtr[id] = p; } __host__ static void stageIntCpy( int id, const int v ) { if (prevValSet[id] == true && prevInt[id] == v) return; StagedInt n = { v, id }; stagedInt.push_back( n ); prevValSet[id] = true; prevInt[id] = v; } __host__ static void stageF32Cpy( int id, const float v ) { if (prevValSet[id] == true && prevFloat[id] == v) return; StagedF32 n = { v, id }; stagedF32.push_back( n ); prevValSet[id] = true; prevFloat[id] = v; } __host__ static void stageMatCpy( int id, const mat4& m ) { StagedMat n = { m, id }; stagedMat.push_back( n ); } __host__ static void stageInt4Cpy( int id, const int4& v ) { if (prevValSet[id] == true && prevInt4[id].x == v.x && prevInt4[id].y == v.y && prevInt4[id].z == v.z && prevInt4[id].w == v.w) return; StagedInt4 n = { v, id }; stagedInt4.push_back( n ); prevValSet[id] = true; prevInt4[id] = v; } /* __host__ static void stageFloat3Cpy( int id, const float3& v ) { if (prevValSet[id] == true && prevFloat3[id].x == v.x && prevFloat3[id].y == v.y && prevFloat3[id].z == v.z) return; StagedFloat3 n = { v, id }; stagedFloat3.push_back( n ); prevValSet[id] = true; prevFloat3[id] = v; } */ __host__ void stageMemcpy( void* d, void* s, int n ) { StagedCpy c = { d, s, n }; stagedCpy.push_back( c ); } __host__ void stageInstanceDescriptors( CoreInstanceDesc* p ) { stagePtrCpy( INSTS /* instanceDescriptors */, p ); } __host__ void stageMaterialList( CUDAMaterial* p ) { stagePtrCpy( MATS /* materials */, p ); } __host__ void stageTriLights( CoreLightTri* p ) { stagePtrCpy( TLGHTS /* triLights */, p ); } __host__ void stagePointLights( CorePointLight* p ) { stagePtrCpy( PLGHTS /* pointLights */, p ); } __host__ void stageSpotLights( CoreSpotLight* p ) { stagePtrCpy( SLGHTS /* spotLights */, p ); } __host__ void stageDirectionalLights( CoreDirectionalLight* p ) { stagePtrCpy( DLGHTS /* directionalLights */, p ); } __host__ void stageARGB32Pixels( uint* p ) { stagePtrCpy( RGB32 /* argb32 */, p ); } __host__ void stageARGB128Pixels( float4* p ) { stagePtrCpy( RGBH /* argb128 */, p ); } __host__ void stageNRM32Pixels( uint* p ) { stagePtrCpy( NRMLS /* nrm32 */, p ); } __host__ void stageSkyPixels( float4* p ) { stagePtrCpy( SKYPIX /* skyPixels */, p ); } __host__ void stageSkySize( int w, int h ) { stageIntCpy( SKYW /* skywidth */, w ); stageIntCpy( SKYH /* skyheight */, h ); } __host__ void stageWorldToSky( const mat4& worldToLight ) { stageMatCpy( SMAT /* worldToSky */, worldToLight ); } __host__ void stageDebugData( float4* p ) { stagePtrCpy( DBGDAT /* debugData */, p ); } __host__ void stageGeometryEpsilon( float e ) { stageF32Cpy( GEPS /* geometryEpsilon */, e ); } __host__ void stageClampValue( float c ) { stageF32Cpy( CLMPV /* clampValue */, c ); } __host__ void stageLightTree( LightCluster* t ) { stagePtrCpy( LTREE /* light tree */, t ); } __host__ void stageLightCounts( int tri, int point, int spot, int directional ) { const int4 counts = make_int4( tri, point, spot, directional ); stageInt4Cpy( LCNTS /* lightCounts */, counts ); } __host__ void pushStagedCopies() { for (auto c : stagedCpy) hipMemcpy( c.d, c.s, c.n, hipMemcpyHostToDevice ); stagedCpy.clear(); for (auto n : stagedPtr) pushPtrCpy( n.id, n.p ); stagedPtr.clear(); for (auto n : stagedInt) pushIntCpy( n.id, n.v ); stagedInt.clear(); for (auto n : stagedInt4) pushInt4Cpy( n.id, n.v ); stagedInt4.clear(); for (auto n : stagedFloat3) pushFloat3Cpy( n.id, n.v ); stagedFloat3.clear(); for (auto n : stagedF32) pushF32Cpy( n.id, n.v ); stagedF32.clear(); for (auto n : stagedMat) pushMatCpy( n.id, n.v ); stagedMat.clear(); } // counters for persistent threads static __device__ Counters* counters; __global__ void InitCountersForExtend_Kernel( int pathCount ) { if (threadIdx.x != 0) return; counters->activePaths = pathCount; // remaining active paths counters->shaded = 0; // persistent thread atomic for shade kernel counters->generated = 0; // persistent thread atomic for generate in .optix.cu counters->extensionRays = 0; // compaction counter for extension rays counters->shadowRays = 0; // compaction counter for connections counters->connected = 0; counters->totalExtensionRays = pathCount; counters->totalShadowRays = 0; } __host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); } __global__ void InitCountersSubsequent_Kernel() { if (threadIdx.x != 0) return; counters->totalExtensionRays += counters->extensionRays; counters->activePaths = counters->extensionRays; // remaining active paths counters->extended = 0; // persistent thread atomic for genSecond in .optix.cu counters->shaded = 0; // persistent thread atomic for shade kernel counters->extensionRays = 0; // compaction counter for extension rays } __host__ void InitCountersSubsequent() { InitCountersSubsequent_Kernel << <1, 32 >> > (); } __host__ void SetCounters( Counters* p ) { hipMemcpyToSymbol( counters, &p, sizeof( void* ) ); } // functional blocks #include "tools_shared.h" #include "sampling_shared.h" #include "material_shared.h" #include "lights_shared.h" #include "bsdf.h" #include "pathtracer.h" #include "finalize_shared.h" } // namespace lh2core // EOF
b489622efa3e9489916680e9fa6a25d0ec3aee10.cu
/* .cuda.cu - Copyright 2019/2020 Utrecht University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include ".cuda.h" namespace lh2core { // path tracing buffers and global variables __constant__ CoreInstanceDesc* instanceDescriptors; __constant__ CUDAMaterial* materials; __constant__ CoreLightTri* triLights; __constant__ CorePointLight* pointLights; __constant__ CoreSpotLight* spotLights; __constant__ CoreDirectionalLight* directionalLights; __constant__ int4 lightCounts; // area, point, spot, directional __constant__ uchar4* argb32; __constant__ float4* argb128; __constant__ uchar4* nrm32; __constant__ float4* skyPixels; __constant__ int skywidth; __constant__ int skyheight; __constant__ PathState* pathStates; __constant__ float4* debugData; __constant__ LightCluster* lightTree; __constant__ mat4 worldToSky; // path tracer settings __constant__ __device__ float geometryEpsilon; __constant__ __device__ float clampValue; // staging: copies will be batched and carried out after rendering completes, // to allow the CPU to update the scene concurrently with GPU rendering. enum { INSTS = 0, MATS, TLGHTS, PLGHTS, SLGHTS, DLGHTS, LCNTS, RGB32, RGBH, NRMLS, SKYPIX, SKYW, SKYH, SMAT, DBGDAT, GEPS, CLMPV, LTREE }; // device pointers are not real pointers for nvcc, so we need a bit of a hack. struct StagedPtr { void* p; int id; }; struct StagedInt { int v; int id; }; struct StagedInt4 { int4 v; int id; }; struct StagedFloat3 { float3 v; int id; }; struct StagedMat { mat4 v; int id; }; struct StagedF32 { float v; int id; }; struct StagedCpy { void* d; void* s; int n; }; static std::vector<StagedPtr> stagedPtr; static std::vector<StagedInt> stagedInt; static std::vector<StagedInt4> stagedInt4; static std::vector<StagedFloat3> stagedFloat3; static std::vector<StagedMat> stagedMat; static std::vector<StagedF32> stagedF32; static std::vector<StagedCpy> stagedCpy; __host__ static void pushPtrCpy( int id, void* p ) { if (id == INSTS) cudaMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) ); if (id == MATS) cudaMemcpyToSymbol( materials, &p, sizeof( void* ) ); if (id == TLGHTS) cudaMemcpyToSymbol( triLights, &p, sizeof( void* ) ); if (id == PLGHTS) cudaMemcpyToSymbol( pointLights, &p, sizeof( void* ) ); if (id == SLGHTS) cudaMemcpyToSymbol( spotLights, &p, sizeof( void* ) ); if (id == DLGHTS) cudaMemcpyToSymbol( directionalLights, &p, sizeof( void* ) ); if (id == RGB32) cudaMemcpyToSymbol( argb32, &p, sizeof( void* ) ); if (id == RGBH) cudaMemcpyToSymbol( argb128, &p, sizeof( void* ) ); if (id == NRMLS) cudaMemcpyToSymbol( nrm32, &p, sizeof( void* ) ); if (id == SKYPIX) cudaMemcpyToSymbol( skyPixels, &p, sizeof( void* ) ); if (id == DBGDAT) cudaMemcpyToSymbol( debugData, &p, sizeof( void* ) ); if (id == LTREE) cudaMemcpyToSymbol( lightTree, &p, sizeof( void* ) ); } __host__ static void pushIntCpy( int id, const int v ) { if (id == SKYW) cudaMemcpyToSymbol( skywidth, &v, sizeof( int ) ); if (id == SKYH) cudaMemcpyToSymbol( skyheight, &v, sizeof( int ) ); } __host__ static void pushF32Cpy( int id, const float v ) { if (id == GEPS) cudaMemcpyToSymbol( geometryEpsilon, &v, sizeof( float ) ); if (id == CLMPV) cudaMemcpyToSymbol( clampValue, &v, sizeof( int ) ); } __host__ static void pushMatCpy( int id, const mat4& m ) { if (id == SMAT) cudaMemcpyToSymbol( worldToSky, &m, sizeof( mat4 ) ); } __host__ static void pushInt4Cpy( int id, const int4& v ) { if (id == LCNTS) cudaMemcpyToSymbol( lightCounts, &v, sizeof( int4 ) ); } __host__ static void pushFloat3Cpy( int id, const float3& v ) { // nothing here yet } #define MAXVARS 32 static void* prevPtr[MAXVARS] = {}; static int prevInt[MAXVARS] = {}; static float prevFloat[MAXVARS] = {}; static int4 prevInt4[MAXVARS] = {}; // static float3 prevFloat3[MAXVARS] = {}; static bool prevValSet[MAXVARS] = {}; __host__ static void stagePtrCpy( int id, void* p ) { if (prevPtr[id] == p) return; // not changed StagedPtr n = { p, id }; stagedPtr.push_back( n ); prevPtr[id] = p; } __host__ static void stageIntCpy( int id, const int v ) { if (prevValSet[id] == true && prevInt[id] == v) return; StagedInt n = { v, id }; stagedInt.push_back( n ); prevValSet[id] = true; prevInt[id] = v; } __host__ static void stageF32Cpy( int id, const float v ) { if (prevValSet[id] == true && prevFloat[id] == v) return; StagedF32 n = { v, id }; stagedF32.push_back( n ); prevValSet[id] = true; prevFloat[id] = v; } __host__ static void stageMatCpy( int id, const mat4& m ) { StagedMat n = { m, id }; stagedMat.push_back( n ); } __host__ static void stageInt4Cpy( int id, const int4& v ) { if (prevValSet[id] == true && prevInt4[id].x == v.x && prevInt4[id].y == v.y && prevInt4[id].z == v.z && prevInt4[id].w == v.w) return; StagedInt4 n = { v, id }; stagedInt4.push_back( n ); prevValSet[id] = true; prevInt4[id] = v; } /* __host__ static void stageFloat3Cpy( int id, const float3& v ) { if (prevValSet[id] == true && prevFloat3[id].x == v.x && prevFloat3[id].y == v.y && prevFloat3[id].z == v.z) return; StagedFloat3 n = { v, id }; stagedFloat3.push_back( n ); prevValSet[id] = true; prevFloat3[id] = v; } */ __host__ void stageMemcpy( void* d, void* s, int n ) { StagedCpy c = { d, s, n }; stagedCpy.push_back( c ); } __host__ void stageInstanceDescriptors( CoreInstanceDesc* p ) { stagePtrCpy( INSTS /* instanceDescriptors */, p ); } __host__ void stageMaterialList( CUDAMaterial* p ) { stagePtrCpy( MATS /* materials */, p ); } __host__ void stageTriLights( CoreLightTri* p ) { stagePtrCpy( TLGHTS /* triLights */, p ); } __host__ void stagePointLights( CorePointLight* p ) { stagePtrCpy( PLGHTS /* pointLights */, p ); } __host__ void stageSpotLights( CoreSpotLight* p ) { stagePtrCpy( SLGHTS /* spotLights */, p ); } __host__ void stageDirectionalLights( CoreDirectionalLight* p ) { stagePtrCpy( DLGHTS /* directionalLights */, p ); } __host__ void stageARGB32Pixels( uint* p ) { stagePtrCpy( RGB32 /* argb32 */, p ); } __host__ void stageARGB128Pixels( float4* p ) { stagePtrCpy( RGBH /* argb128 */, p ); } __host__ void stageNRM32Pixels( uint* p ) { stagePtrCpy( NRMLS /* nrm32 */, p ); } __host__ void stageSkyPixels( float4* p ) { stagePtrCpy( SKYPIX /* skyPixels */, p ); } __host__ void stageSkySize( int w, int h ) { stageIntCpy( SKYW /* skywidth */, w ); stageIntCpy( SKYH /* skyheight */, h ); } __host__ void stageWorldToSky( const mat4& worldToLight ) { stageMatCpy( SMAT /* worldToSky */, worldToLight ); } __host__ void stageDebugData( float4* p ) { stagePtrCpy( DBGDAT /* debugData */, p ); } __host__ void stageGeometryEpsilon( float e ) { stageF32Cpy( GEPS /* geometryEpsilon */, e ); } __host__ void stageClampValue( float c ) { stageF32Cpy( CLMPV /* clampValue */, c ); } __host__ void stageLightTree( LightCluster* t ) { stagePtrCpy( LTREE /* light tree */, t ); } __host__ void stageLightCounts( int tri, int point, int spot, int directional ) { const int4 counts = make_int4( tri, point, spot, directional ); stageInt4Cpy( LCNTS /* lightCounts */, counts ); } __host__ void pushStagedCopies() { for (auto c : stagedCpy) cudaMemcpy( c.d, c.s, c.n, cudaMemcpyHostToDevice ); stagedCpy.clear(); for (auto n : stagedPtr) pushPtrCpy( n.id, n.p ); stagedPtr.clear(); for (auto n : stagedInt) pushIntCpy( n.id, n.v ); stagedInt.clear(); for (auto n : stagedInt4) pushInt4Cpy( n.id, n.v ); stagedInt4.clear(); for (auto n : stagedFloat3) pushFloat3Cpy( n.id, n.v ); stagedFloat3.clear(); for (auto n : stagedF32) pushF32Cpy( n.id, n.v ); stagedF32.clear(); for (auto n : stagedMat) pushMatCpy( n.id, n.v ); stagedMat.clear(); } // counters for persistent threads static __device__ Counters* counters; __global__ void InitCountersForExtend_Kernel( int pathCount ) { if (threadIdx.x != 0) return; counters->activePaths = pathCount; // remaining active paths counters->shaded = 0; // persistent thread atomic for shade kernel counters->generated = 0; // persistent thread atomic for generate in .optix.cu counters->extensionRays = 0; // compaction counter for extension rays counters->shadowRays = 0; // compaction counter for connections counters->connected = 0; counters->totalExtensionRays = pathCount; counters->totalShadowRays = 0; } __host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); } __global__ void InitCountersSubsequent_Kernel() { if (threadIdx.x != 0) return; counters->totalExtensionRays += counters->extensionRays; counters->activePaths = counters->extensionRays; // remaining active paths counters->extended = 0; // persistent thread atomic for genSecond in .optix.cu counters->shaded = 0; // persistent thread atomic for shade kernel counters->extensionRays = 0; // compaction counter for extension rays } __host__ void InitCountersSubsequent() { InitCountersSubsequent_Kernel << <1, 32 >> > (); } __host__ void SetCounters( Counters* p ) { cudaMemcpyToSymbol( counters, &p, sizeof( void* ) ); } // functional blocks #include "tools_shared.h" #include "sampling_shared.h" #include "material_shared.h" #include "lights_shared.h" #include "bsdf.h" #include "pathtracer.h" #include "finalize_shared.h" } // namespace lh2core // EOF
2abe3d6eb294c350c74540e158f9b2e8bb5ec2fa.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "../inc/helper_cuda.h" #include "common_ccl.h" //For CUDA error checking #define cudaErrorCheck(t) { \ t; \ hipError_t e=hipGetLastError(); \ if(e!=hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \ exit(0); \ } \ } //Defines a default thread block size (overwritten by args) int regionWidth = 8; int regionHeight = 8; int total_index; __global__ void gpu_label(int width, int height, int* globalImage) { // STEP 1 - Initial Labelling int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int idx = width*y+x+1; // +1 to avoid 0 labels int temp; if ((x<width) && (y<height)) { temp = globalImage[x+y*width]; if(temp != 0) { globalImage[x+y*width] = idx; } //printf("x = %d, y = %d, i = %d\n",x,y,idx); } } __device__ int getMinNeighbourScan(int x, int y, int width, int height, int label, int* globalImage) { int minLabel = label, curr = -1; // south-west if(x > 0 && y < (height-1)) curr = globalImage[x-1+(y+1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // west if(x > 0) curr = globalImage[x-1+(y)*width]; if(curr > 0) minLabel = min(minLabel,curr); // north-west if(x > 0 && y > 0) curr = globalImage[x-1+(y-1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // north if(y > 0) curr = globalImage[x+(y-1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // north-east if(x < (width-1) && y > 0) curr = globalImage[x+1+(y-1)*width]; if(curr > 0) minLabel = min(minLabel,curr); return minLabel; } __global__ void gpu_scan(int width, int height, int* globalImage) { // STEP 2 - Scanning int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int label; if ((x<width) && (y<height)) { label = globalImage[x+y*width]; if(label != 0) { int minLabelScanned = getMinNeighbourScan(x,y,width,height,label,globalImage); globalImage[x+y*width] = minLabelScanned; } } } __global__ void gpu_analysis(int width, int height, int* globalImage) { // STEP 3 - Analysis int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; //int i = y*width + x; int label; if ((x<width) && (y<height)) { label = globalImage[x+y*width]; if(label != 0) { // propagate labels // "recursively" get the final label // - if first referred pixel index refers to another label // - stop when the label refers to itself int idx = -1; int lx,ly; while(label != (idx+1)) { idx = label-1; // -1 since labels start from 1 and we want 1D pixel index lx = idx%width; ly = idx/width; label = globalImage[lx+ly*width]; } globalImage[x+y*width] = label; } } } __device__ int getMinNeighbourLink(int x, int y, int width, int height, int label, int* globalImage) { int minLabel = label; int curr = -1; // CHANGED FROM PAPER // Need to check south-east, north, and north-west as well for the algorithm to work // south-west if(x > 0 && y < (height-1)) curr = globalImage[x-1+(y+1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // south-east if(x < (width-1) && y < (height-1)) curr = globalImage[x+1+(y+1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // west if(x > 0) curr = globalImage[x-1+(y)*width]; if(curr > 0) minLabel = min(minLabel,curr); // east if(x < (width-1)) curr = globalImage[x+1+(y)*width]; if(curr > 0) minLabel = min(minLabel,curr); // north-east if(x < (width-1) && y > 0) curr = globalImage[x+1+(y-1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // north if(y > 0) curr = globalImage[x+(y-1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // north-west if(x > 0 && y > 0) curr = globalImage[x-1+(y-1)*width]; if(curr > 0) minLabel = min(minLabel,curr); return minLabel; } __global__ void gpu_link(int width, int height, int* globalImage) { // STEP 4 - Link int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; //int i = y*width + x; int label; if ((x<width) && (y<height)) { label = globalImage[x+y*width]; if(label != 0) { // scan neighbours int minLabel = getMinNeighbourLink(x,y,width, height, label, globalImage); // update pixel of REFERENCE label (not current pixel) // this is so that all other pixels can simply reference that pixel // in the next step if(minLabel < label) { int refIdx = label-1; // -1 since labels start from 1 and we want 1D pixel index int refx = refIdx%width; int refy = refIdx/width; // reduces contention - makes it faster than surface atomicMin(&globalImage[refx+refy*width],minLabel); } } } } __global__ void gpu_relabel(int width, int height, int* globalImage) { // STEP 5 - Re-label int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; //int i = y*width + x; int label; if ((x<width) && (y<height)) { label = globalImage[x+y*width]; if(label != 0) { // resolve label equivalences (after previous step) int refIdx = label-1; // -1 since labels start from 1 and we want 1D pixel index int refx = refIdx%width; int refy = refIdx/width; int refLabel; refLabel = globalImage[refx+refy*width]; globalImage[x+y*width] = refLabel; } } } __device__ bool done; __global__ void gpu_rescan(int width, int height, int* globalImage) { // STEP 5 - Re-Scan int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; //int i = y*width + x; int label; if ((x<width) && (y<height)) { label = globalImage[x+y*width]; if(label != 0) { // check if all regions are connected int minNeighbour = getMinNeighbourScan(x,y,width,height,label,globalImage); if(minNeighbour != label) { done = false; } } } } //Get ready to do work on the GPU void gpu_label(int* image, CPUBitmap* output, int width, int height, float* gpuTime) { //Copy image over int* globalImage; hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindSigned); cudaErrorCheck(hipMalloc((void**)&globalImage,width*height*sizeof(int))); cudaErrorCheck(hipMemcpy(globalImage,image,width*height*sizeof(int), hipMemcpyHostToDevice)); //Define grid dim3 block_dim(regionWidth, regionHeight); int gridWidth = width/block_dim.x; int gridHeight = height/block_dim.y; if (width%block_dim.x != 0) gridWidth++; if (height%block_dim.y != 0) gridHeight++; bool result = false; dim3 grid_dim(gridWidth, gridHeight); //Start timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); //Our main algorithm - reference the "Parallel Connected-Component //Labeling Algorithm for GPGPU Applications" in ./documents for more //understanding hipLaunchKernelGGL(( gpu_label), dim3(grid_dim), dim3(block_dim), 0, 0, width, height, globalImage); hipDeviceSynchronize(); hipLaunchKernelGGL(( gpu_scan), dim3(grid_dim), dim3(block_dim), 0, 0, width, height, globalImage); hipDeviceSynchronize(); hipLaunchKernelGGL(( gpu_analysis), dim3(grid_dim), dim3(block_dim), 0, 0, width, height, globalImage); hipDeviceSynchronize(); while(result == false) { hipLaunchKernelGGL(( gpu_link), dim3(grid_dim), dim3(block_dim), 0, 0, width, height, globalImage); hipDeviceSynchronize(); hipLaunchKernelGGL(( gpu_relabel), dim3(grid_dim), dim3(block_dim), 0, 0, width, height, globalImage); hipDeviceSynchronize(); result = true; cudaErrorCheck(hipMemcpyToSymbol(done, &result, sizeof(bool))); hipLaunchKernelGGL(( gpu_rescan), dim3(grid_dim), dim3(block_dim), 0, 0, width, height, globalImage); hipDeviceSynchronize(); cudaErrorCheck(hipMemcpyFromSymbol(&result, done, sizeof(bool))); } //Stop timing hipEventRecord(stop); hipEventSynchronize(stop); *gpuTime = 0; hipEventElapsedTime(gpuTime, start, stop); //Get result cudaErrorCheck(hipMemcpy(image, globalImage,width*height*sizeof(int), hipMemcpyDeviceToHost)); cudaErrorCheck(hipFree(globalImage)); } int main(int argc, char **argv) { int width, height; int* dims = new int[2]; int* binaryImage; CPUBitmap *bitmap; DataBlock data; BMP output; BMP input; struct arguments parsed_args; //Parse args, load image if (!start(argc, argv, width, height, input, parsed_args)) exit(EXIT_FAILURE); regionWidth = parsed_args.region_width; regionHeight = parsed_args.region_width; //Binarize, initialize output bitmap = new CPUBitmap( width, height, &data ); data.bitmap = bitmap; copyBMPtoBitmap(&input,bitmap); binaryImage = new int[(width)*(height)]; bitmapToBinary(bitmap,binaryImage); output.SetSize(width,height); output.SetBitDepth(32); // RGBA fprintf(stderr,"LABELLING...\n"); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); //Main algorithm float gpuTime = 0; gpu_label(binaryImage,bitmap,width,height,&gpuTime); //Stop timing hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); fprintf(stderr,"FINISHED...\n"); //Print times if (!parsed_args.bench) { printf("Time elapsed (gpu): %.6f ms\n",gpuTime); printf("Time elapsed (total): %.6f ms\n",milliseconds); } else { printf("%s,%d,%d,%f,%f\n",parsed_args.mode==NORMAL_MODE?"normal":"random", width*height,regionWidth*regionHeight, gpuTime,milliseconds); } //Colourise, display, and save finish(width, height, output, bitmap, binaryImage, parsed_args, "ccl_gpu_global"); delete[] binaryImage; }
2abe3d6eb294c350c74540e158f9b2e8bb5ec2fa.cu
#include <cuda_runtime.h> #include <cuda.h> #include "../inc/helper_cuda.h" #include "common_ccl.h" //For CUDA error checking #define cudaErrorCheck(t) { \ t; \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ exit(0); \ } \ } //Defines a default thread block size (overwritten by args) int regionWidth = 8; int regionHeight = 8; int total_index; __global__ void gpu_label(int width, int height, int* globalImage) { // STEP 1 - Initial Labelling int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int idx = width*y+x+1; // +1 to avoid 0 labels int temp; if ((x<width) && (y<height)) { temp = globalImage[x+y*width]; if(temp != 0) { globalImage[x+y*width] = idx; } //printf("x = %d, y = %d, i = %d\n",x,y,idx); } } __device__ int getMinNeighbourScan(int x, int y, int width, int height, int label, int* globalImage) { int minLabel = label, curr = -1; // south-west if(x > 0 && y < (height-1)) curr = globalImage[x-1+(y+1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // west if(x > 0) curr = globalImage[x-1+(y)*width]; if(curr > 0) minLabel = min(minLabel,curr); // north-west if(x > 0 && y > 0) curr = globalImage[x-1+(y-1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // north if(y > 0) curr = globalImage[x+(y-1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // north-east if(x < (width-1) && y > 0) curr = globalImage[x+1+(y-1)*width]; if(curr > 0) minLabel = min(minLabel,curr); return minLabel; } __global__ void gpu_scan(int width, int height, int* globalImage) { // STEP 2 - Scanning int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int label; if ((x<width) && (y<height)) { label = globalImage[x+y*width]; if(label != 0) { int minLabelScanned = getMinNeighbourScan(x,y,width,height,label,globalImage); globalImage[x+y*width] = minLabelScanned; } } } __global__ void gpu_analysis(int width, int height, int* globalImage) { // STEP 3 - Analysis int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; //int i = y*width + x; int label; if ((x<width) && (y<height)) { label = globalImage[x+y*width]; if(label != 0) { // propagate labels // "recursively" get the final label // - if first referred pixel index refers to another label // - stop when the label refers to itself int idx = -1; int lx,ly; while(label != (idx+1)) { idx = label-1; // -1 since labels start from 1 and we want 1D pixel index lx = idx%width; ly = idx/width; label = globalImage[lx+ly*width]; } globalImage[x+y*width] = label; } } } __device__ int getMinNeighbourLink(int x, int y, int width, int height, int label, int* globalImage) { int minLabel = label; int curr = -1; // CHANGED FROM PAPER // Need to check south-east, north, and north-west as well for the algorithm to work // south-west if(x > 0 && y < (height-1)) curr = globalImage[x-1+(y+1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // south-east if(x < (width-1) && y < (height-1)) curr = globalImage[x+1+(y+1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // west if(x > 0) curr = globalImage[x-1+(y)*width]; if(curr > 0) minLabel = min(minLabel,curr); // east if(x < (width-1)) curr = globalImage[x+1+(y)*width]; if(curr > 0) minLabel = min(minLabel,curr); // north-east if(x < (width-1) && y > 0) curr = globalImage[x+1+(y-1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // north if(y > 0) curr = globalImage[x+(y-1)*width]; if(curr > 0) minLabel = min(minLabel,curr); // north-west if(x > 0 && y > 0) curr = globalImage[x-1+(y-1)*width]; if(curr > 0) minLabel = min(minLabel,curr); return minLabel; } __global__ void gpu_link(int width, int height, int* globalImage) { // STEP 4 - Link int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; //int i = y*width + x; int label; if ((x<width) && (y<height)) { label = globalImage[x+y*width]; if(label != 0) { // scan neighbours int minLabel = getMinNeighbourLink(x,y,width, height, label, globalImage); // update pixel of REFERENCE label (not current pixel) // this is so that all other pixels can simply reference that pixel // in the next step if(minLabel < label) { int refIdx = label-1; // -1 since labels start from 1 and we want 1D pixel index int refx = refIdx%width; int refy = refIdx/width; // reduces contention - makes it faster than surface atomicMin(&globalImage[refx+refy*width],minLabel); } } } } __global__ void gpu_relabel(int width, int height, int* globalImage) { // STEP 5 - Re-label int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; //int i = y*width + x; int label; if ((x<width) && (y<height)) { label = globalImage[x+y*width]; if(label != 0) { // resolve label equivalences (after previous step) int refIdx = label-1; // -1 since labels start from 1 and we want 1D pixel index int refx = refIdx%width; int refy = refIdx/width; int refLabel; refLabel = globalImage[refx+refy*width]; globalImage[x+y*width] = refLabel; } } } __device__ bool done; __global__ void gpu_rescan(int width, int height, int* globalImage) { // STEP 5 - Re-Scan int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; //int i = y*width + x; int label; if ((x<width) && (y<height)) { label = globalImage[x+y*width]; if(label != 0) { // check if all regions are connected int minNeighbour = getMinNeighbourScan(x,y,width,height,label,globalImage); if(minNeighbour != label) { done = false; } } } } //Get ready to do work on the GPU void gpu_label(int* image, CPUBitmap* output, int width, int height, float* gpuTime) { //Copy image over int* globalImage; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindSigned); cudaErrorCheck(cudaMalloc((void**)&globalImage,width*height*sizeof(int))); cudaErrorCheck(cudaMemcpy(globalImage,image,width*height*sizeof(int), cudaMemcpyHostToDevice)); //Define grid dim3 block_dim(regionWidth, regionHeight); int gridWidth = width/block_dim.x; int gridHeight = height/block_dim.y; if (width%block_dim.x != 0) gridWidth++; if (height%block_dim.y != 0) gridHeight++; bool result = false; dim3 grid_dim(gridWidth, gridHeight); //Start timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //Our main algorithm - reference the "Parallel Connected-Component //Labeling Algorithm for GPGPU Applications" in ./documents for more //understanding gpu_label<<<grid_dim, block_dim>>>(width, height, globalImage); cudaDeviceSynchronize(); gpu_scan<<<grid_dim, block_dim>>>(width, height, globalImage); cudaDeviceSynchronize(); gpu_analysis<<<grid_dim, block_dim>>>(width, height, globalImage); cudaDeviceSynchronize(); while(result == false) { gpu_link<<<grid_dim, block_dim>>>(width, height, globalImage); cudaDeviceSynchronize(); gpu_relabel<<<grid_dim, block_dim>>>(width, height, globalImage); cudaDeviceSynchronize(); result = true; cudaErrorCheck(cudaMemcpyToSymbol(done, &result, sizeof(bool))); gpu_rescan<<<grid_dim, block_dim>>>(width, height, globalImage); cudaDeviceSynchronize(); cudaErrorCheck(cudaMemcpyFromSymbol(&result, done, sizeof(bool))); } //Stop timing cudaEventRecord(stop); cudaEventSynchronize(stop); *gpuTime = 0; cudaEventElapsedTime(gpuTime, start, stop); //Get result cudaErrorCheck(cudaMemcpy(image, globalImage,width*height*sizeof(int), cudaMemcpyDeviceToHost)); cudaErrorCheck(cudaFree(globalImage)); } int main(int argc, char **argv) { int width, height; int* dims = new int[2]; int* binaryImage; CPUBitmap *bitmap; DataBlock data; BMP output; BMP input; struct arguments parsed_args; //Parse args, load image if (!start(argc, argv, width, height, input, parsed_args)) exit(EXIT_FAILURE); regionWidth = parsed_args.region_width; regionHeight = parsed_args.region_width; //Binarize, initialize output bitmap = new CPUBitmap( width, height, &data ); data.bitmap = bitmap; copyBMPtoBitmap(&input,bitmap); binaryImage = new int[(width)*(height)]; bitmapToBinary(bitmap,binaryImage); output.SetSize(width,height); output.SetBitDepth(32); // RGBA fprintf(stderr,"LABELLING...\n"); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //Main algorithm float gpuTime = 0; gpu_label(binaryImage,bitmap,width,height,&gpuTime); //Stop timing cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); fprintf(stderr,"FINISHED...\n"); //Print times if (!parsed_args.bench) { printf("Time elapsed (gpu): %.6f ms\n",gpuTime); printf("Time elapsed (total): %.6f ms\n",milliseconds); } else { printf("%s,%d,%d,%f,%f\n",parsed_args.mode==NORMAL_MODE?"normal":"random", width*height,regionWidth*regionHeight, gpuTime,milliseconds); } //Colourise, display, and save finish(width, height, output, bitmap, binaryImage, parsed_args, "ccl_gpu_global"); delete[] binaryImage; }
019cc43fbe083704586826aa1c59537d44792c20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ******************** * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ // modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu #include <ATen/ATen.h> #include <THH/THHAtomics.cuh> #include <stdio.h> #include <math.h> #include <float.h> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } template <typename scalar_t> __device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const scalar_t map_h = i * dilation_h + offset_h; //const scalar_t map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } void deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *data_col_ = data_col.data<scalar_t>(); hipLaunchKernelGGL(( deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_im2col: %s\n", hipGetErrorString(err)); } } template <typename scalar_t> __global__ void deformable_col2im_gpu_kernel( const int n, const scalar_t *data_col, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } void deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *grad_im_ = grad_im.data<scalar_t>(); hipLaunchKernelGGL(( deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_col2im: %s\n", hipGetErrorString(err)); } } template <typename scalar_t> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } void deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, data_col_, data_im_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } template <typename scalar_t> __device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void modulated_deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const float map_h = i * dilation_h + offset_h; //const float map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; //data_col_ptr += height_col * width_col; } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset, scalar_t *grad_mask) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const scalar_t weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval); grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } void modulated_deformable_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *data_col_ = data_col.data<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *grad_im_ = grad_im.data<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); scalar_t *grad_mask_ = grad_mask.data<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err)); } }
019cc43fbe083704586826aa1c59537d44792c20.cu
/*! ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ******************** * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ // modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu #include <ATen/ATen.h> #include <THC/THCAtomics.cuh> #include <stdio.h> #include <math.h> #include <float.h> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } template <typename scalar_t> __device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const scalar_t map_h = i * dilation_h + offset_h; //const scalar_t map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } void deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *data_col_ = data_col.data<scalar_t>(); deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); } } template <typename scalar_t> __global__ void deformable_col2im_gpu_kernel( const int n, const scalar_t *data_col, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } void deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *grad_im_ = grad_im.data<scalar_t>(); deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); } } template <typename scalar_t> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } void deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, data_col_, data_im_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } template <typename scalar_t> __device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void modulated_deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const float map_h = i * dilation_h + offset_h; //const float map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; //data_col_ptr += height_col * width_col; } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset, scalar_t *grad_mask) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const scalar_t weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval); grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } void modulated_deformable_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *data_col_ = data_col.data<scalar_t>(); modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *grad_im_ = grad_im.data<scalar_t>(); modulated_deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); scalar_t *grad_mask_ = grad_mask.data<scalar_t>(); modulated_deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); } }
4d54ec2979d2e6d56ce4957fe6dc34834e27c8cf.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "euclidean_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *vg_a = NULL; hipMalloc(&vg_a, XSIZE*YSIZE); size_t pitch_a = 2; size_t n_a = XSIZE; const float *vg_b = NULL; hipMalloc(&vg_b, XSIZE*YSIZE); size_t pitch_b = 2; size_t n_b = YSIZE; size_t k = 1; float *d = NULL; hipMalloc(&d, XSIZE*YSIZE); size_t pitch_d = 2; float p = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( euclidean_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, vg_a,pitch_a,n_a,vg_b,pitch_b,n_b,k,d,pitch_d,p); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( euclidean_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, vg_a,pitch_a,n_a,vg_b,pitch_b,n_b,k,d,pitch_d,p); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( euclidean_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, vg_a,pitch_a,n_a,vg_b,pitch_b,n_b,k,d,pitch_d,p); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4d54ec2979d2e6d56ce4957fe6dc34834e27c8cf.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "euclidean_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *vg_a = NULL; cudaMalloc(&vg_a, XSIZE*YSIZE); size_t pitch_a = 2; size_t n_a = XSIZE; const float *vg_b = NULL; cudaMalloc(&vg_b, XSIZE*YSIZE); size_t pitch_b = 2; size_t n_b = YSIZE; size_t k = 1; float *d = NULL; cudaMalloc(&d, XSIZE*YSIZE); size_t pitch_d = 2; float p = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); euclidean_kernel<<<gridBlock,threadBlock>>>(vg_a,pitch_a,n_a,vg_b,pitch_b,n_b,k,d,pitch_d,p); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { euclidean_kernel<<<gridBlock,threadBlock>>>(vg_a,pitch_a,n_a,vg_b,pitch_b,n_b,k,d,pitch_d,p); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { euclidean_kernel<<<gridBlock,threadBlock>>>(vg_a,pitch_a,n_a,vg_b,pitch_b,n_b,k,d,pitch_d,p); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f0c8f0ff57dd939009dbef57cb236ce07451c817.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "book.h" #define SIZE (10*1024*1024) float cuda_malloc_test(int size, bool up); float cuda_host_alloc_test(int size, bool up); int main(void) { float elapsedTime; float MB = (float)100 * SIZE * sizeof(int) / 1024 / 1024; elapsedTime = cuda_malloc_test(SIZE, true); printf("Time using hipMalloc: %3.1f ms\n", elapsedTime); printf("\tMB/s during copy up: %3.1f\n", MB/(elapsedTime/1000)); elapsedTime = cuda_malloc_test(SIZE, false); printf("Time using hipMalloc: %3.1f ms\n", elapsedTime); printf("\tMB/s during copy down: %3.1f\n", MB / (elapsedTime / 1000)); elapsedTime = cuda_host_alloc_test(SIZE, true); printf("Time using cudaHostalloc: %3.1f ms\n", elapsedTime); printf("\tMB/s during copy up: %3.1f\n", MB / (elapsedTime / 1000)); elapsedTime = cuda_host_alloc_test(SIZE, false); printf("Time using cudaHostalloc: %3.1f ms\n", elapsedTime); printf("\tMB/s during copy down: %3.1f\n", MB / (elapsedTime / 1000)); } float cuda_malloc_test(int size, bool up) { hipEvent_t start, stop; int* a, * dev_a; float elapsedTime; HANDLE_ERROR(hipEventCreate(&start)); HANDLE_ERROR(hipEventCreate(&stop)); a = (int*)malloc(size * sizeof(*a)); HANDLE_NULL(a); HANDLE_ERROR(hipMalloc((void**)&dev_a, size * sizeof(*dev_a))); HANDLE_ERROR(hipEventRecord(start, 0)); for (int i = 0; i < 100; ++i) { if (up) HANDLE_ERROR(hipMemcpy(dev_a, a, size * sizeof(*dev_a), hipMemcpyHostToDevice)); else HANDLE_ERROR(hipMemcpy(a, dev_a, size * sizeof(*dev_a), hipMemcpyDeviceToHost)); } HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop)); free(a); HANDLE_ERROR(hipFree(dev_a)); HANDLE_ERROR(hipEventDestroy(start)); HANDLE_ERROR(hipEventDestroy(stop)); return elapsedTime; } float cuda_host_alloc_test(int size, bool up) { hipEvent_t start, stop; int* a, * dev_a; float elapsedTime; HANDLE_ERROR(hipEventCreate(&start)); HANDLE_ERROR(hipEventCreate(&stop)); HANDLE_ERROR(hipHostMalloc((void**)&a, size * sizeof(*a), hipHostMallocDefault)); HANDLE_ERROR(hipMalloc((void**)&dev_a, size * sizeof(*dev_a))); HANDLE_ERROR(hipEventRecord(start, 0)); for (int i = 0; i < 100; ++i) { if (up) HANDLE_ERROR(hipMemcpy(dev_a, a, size * sizeof(*dev_a), hipMemcpyHostToDevice)); else HANDLE_ERROR(hipMemcpy(a, dev_a, size * sizeof(*dev_a), hipMemcpyDeviceToHost)); } HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop)); HANDLE_ERROR(hipHostFree(a)); HANDLE_ERROR(hipFree(dev_a)); HANDLE_ERROR(hipEventDestroy(start)); HANDLE_ERROR(hipEventDestroy(stop)); return elapsedTime; }
f0c8f0ff57dd939009dbef57cb236ce07451c817.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "book.h" #define SIZE (10*1024*1024) float cuda_malloc_test(int size, bool up); float cuda_host_alloc_test(int size, bool up); int main(void) { float elapsedTime; float MB = (float)100 * SIZE * sizeof(int) / 1024 / 1024; elapsedTime = cuda_malloc_test(SIZE, true); printf("Time using cudaMalloc: %3.1f ms\n", elapsedTime); printf("\tMB/s during copy up: %3.1f\n", MB/(elapsedTime/1000)); elapsedTime = cuda_malloc_test(SIZE, false); printf("Time using cudaMalloc: %3.1f ms\n", elapsedTime); printf("\tMB/s during copy down: %3.1f\n", MB / (elapsedTime / 1000)); elapsedTime = cuda_host_alloc_test(SIZE, true); printf("Time using cudaHostalloc: %3.1f ms\n", elapsedTime); printf("\tMB/s during copy up: %3.1f\n", MB / (elapsedTime / 1000)); elapsedTime = cuda_host_alloc_test(SIZE, false); printf("Time using cudaHostalloc: %3.1f ms\n", elapsedTime); printf("\tMB/s during copy down: %3.1f\n", MB / (elapsedTime / 1000)); } float cuda_malloc_test(int size, bool up) { cudaEvent_t start, stop; int* a, * dev_a; float elapsedTime; HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); a = (int*)malloc(size * sizeof(*a)); HANDLE_NULL(a); HANDLE_ERROR(cudaMalloc((void**)&dev_a, size * sizeof(*dev_a))); HANDLE_ERROR(cudaEventRecord(start, 0)); for (int i = 0; i < 100; ++i) { if (up) HANDLE_ERROR(cudaMemcpy(dev_a, a, size * sizeof(*dev_a), cudaMemcpyHostToDevice)); else HANDLE_ERROR(cudaMemcpy(a, dev_a, size * sizeof(*dev_a), cudaMemcpyDeviceToHost)); } HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop)); free(a); HANDLE_ERROR(cudaFree(dev_a)); HANDLE_ERROR(cudaEventDestroy(start)); HANDLE_ERROR(cudaEventDestroy(stop)); return elapsedTime; } float cuda_host_alloc_test(int size, bool up) { cudaEvent_t start, stop; int* a, * dev_a; float elapsedTime; HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); HANDLE_ERROR(cudaHostAlloc((void**)&a, size * sizeof(*a), cudaHostAllocDefault)); HANDLE_ERROR(cudaMalloc((void**)&dev_a, size * sizeof(*dev_a))); HANDLE_ERROR(cudaEventRecord(start, 0)); for (int i = 0; i < 100; ++i) { if (up) HANDLE_ERROR(cudaMemcpy(dev_a, a, size * sizeof(*dev_a), cudaMemcpyHostToDevice)); else HANDLE_ERROR(cudaMemcpy(a, dev_a, size * sizeof(*dev_a), cudaMemcpyDeviceToHost)); } HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop)); HANDLE_ERROR(cudaFreeHost(a)); HANDLE_ERROR(cudaFree(dev_a)); HANDLE_ERROR(cudaEventDestroy(start)); HANDLE_ERROR(cudaEventDestroy(stop)); return elapsedTime; }
71dfb1cac2bbfe003026f0ba2ea12968936f8a80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * @Description: * @Author: Tianling Lyu * @Date: 2021-01-09 08:47:49 * @LastEditors: Tianling Lyu * @LastEditTime: 2021-03-11 10:28:59 */ #include "include/fan_weighting.h" #include <cstdio> #include "cuda/cuda_common.h" namespace ct_recon { #ifdef USE_ROCM template <typename T> __global__ void FlatWeightingKernel(const T* in, T* out, const FanWeightingParam param, const int n_elements) { double cents = static_cast<double>(param.ns-1) / 2; for (int thread_id : CudaGridRangeX<int>(n_elements)) { int is = thread_id % param.ns; double s = param.ds * (static_cast<double>(is) - cents); double w = param.dso * fabs(cos(atan2(s, param.dsd))) / param.dsd; out[thread_id] = w * in[thread_id]; } return; } template <typename T> __global__ void FanWeightingKernel(const T* in, T* out, const FanWeightingParam param, const int n_elements) { double cents = static_cast<double>(param.ns-1) / 2; for (int thread_id : CudaGridRangeX<int>(n_elements)) { int is = thread_id % param.ns; double s = param.ds * (static_cast<double>(is) - cents); double w = param.dso * fabs(cos(s / param.dsd)) / param.dsd; out[thread_id] = w * in[thread_id]; } return; } template <typename T> __global__ void FlatWeightingGradKernel(const T* in, T* out, const FanWeightingParam param, const int n_elements) { double cents = static_cast<double>(param.ns-1) / 2; for (int thread_id : CudaGridRangeX<int>(n_elements)) { int is = thread_id % param.ns; double s = param.ds * (static_cast<double>(is) - cents); double w = param.dsd * (param.dso * fabs(cos(atan2(s, param.dsd)))); out[thread_id] = in[thread_id] * w; } return; } template <typename T> __global__ void FanWeightingGradKernel(const T* in, T* out, const FanWeightingParam param, const int n_elements) { double cents = static_cast<double>(param.ns-1) / 2; for (int thread_id : CudaGridRangeX<int>(n_elements)) { int is = thread_id % param.ns; double s = param.ds * (static_cast<double>(is) - cents); double w = param.dsd * (param.dso * fabs(cos(s / param.dsd))); out[thread_id] = in[thread_id] * w; } return; } template <> bool FanWeighting<float>::calculate_on_gpu(const float* in, float* out, hipStream_t stream) const { int n_elements = param_.ns*param_.nrow; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); if (param_.type == 1) { hipLaunchKernelGGL(( FanWeightingKernel<float>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, in, out, param_, n_elements); } else if (param_.type == 2) { hipLaunchKernelGGL(( FlatWeightingKernel<float>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, in, out, param_, n_elements); } else { return false; } hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <> bool FanWeighting<double>::calculate_on_gpu(const double* in, double* out, hipStream_t stream) const { int n_elements = param_.ns*param_.nrow; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); if (param_.type == 1) { hipLaunchKernelGGL(( FanWeightingKernel<double>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, in, out, param_, n_elements); } else if (param_.type == 2) { hipLaunchKernelGGL(( FlatWeightingKernel<double>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, in, out, param_, n_elements); } else { return false; } hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <> bool FanWeightingGrad<float>::calculate_on_gpu(const float* in, float* out, hipStream_t stream) const { int n_elements = param_.ns*param_.nrow; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); if (param_.type == 1) { hipLaunchKernelGGL(( FanWeightingGradKernel<float>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, in, out, param_, n_elements); } else if (param_.type == 2) { hipLaunchKernelGGL(( FlatWeightingGradKernel<float>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, in, out, param_, n_elements); } else { return false; } hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <> bool FanWeightingGrad<double>::calculate_on_gpu(const double* in, double* out, hipStream_t stream) const { int n_elements = param_.ns*param_.nrow; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); if (param_.type == 1) { hipLaunchKernelGGL(( FanWeightingGradKernel<double>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, in, out, param_, n_elements); } else if (param_.type == 2) { hipLaunchKernelGGL(( FlatWeightingGradKernel<double>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, in, out, param_, n_elements); } else { return false; } hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } #endif // USE_ROCM } // namespace ct_recon
71dfb1cac2bbfe003026f0ba2ea12968936f8a80.cu
/* * @Description: * @Author: Tianling Lyu * @Date: 2021-01-09 08:47:49 * @LastEditors: Tianling Lyu * @LastEditTime: 2021-03-11 10:28:59 */ #include "include/fan_weighting.h" #include <cstdio> #include "cuda/cuda_common.h" namespace ct_recon { #ifdef USE_CUDA template <typename T> __global__ void FlatWeightingKernel(const T* in, T* out, const FanWeightingParam param, const int n_elements) { double cents = static_cast<double>(param.ns-1) / 2; for (int thread_id : CudaGridRangeX<int>(n_elements)) { int is = thread_id % param.ns; double s = param.ds * (static_cast<double>(is) - cents); double w = param.dso * fabs(cos(atan2(s, param.dsd))) / param.dsd; out[thread_id] = w * in[thread_id]; } return; } template <typename T> __global__ void FanWeightingKernel(const T* in, T* out, const FanWeightingParam param, const int n_elements) { double cents = static_cast<double>(param.ns-1) / 2; for (int thread_id : CudaGridRangeX<int>(n_elements)) { int is = thread_id % param.ns; double s = param.ds * (static_cast<double>(is) - cents); double w = param.dso * fabs(cos(s / param.dsd)) / param.dsd; out[thread_id] = w * in[thread_id]; } return; } template <typename T> __global__ void FlatWeightingGradKernel(const T* in, T* out, const FanWeightingParam param, const int n_elements) { double cents = static_cast<double>(param.ns-1) / 2; for (int thread_id : CudaGridRangeX<int>(n_elements)) { int is = thread_id % param.ns; double s = param.ds * (static_cast<double>(is) - cents); double w = param.dsd * (param.dso * fabs(cos(atan2(s, param.dsd)))); out[thread_id] = in[thread_id] * w; } return; } template <typename T> __global__ void FanWeightingGradKernel(const T* in, T* out, const FanWeightingParam param, const int n_elements) { double cents = static_cast<double>(param.ns-1) / 2; for (int thread_id : CudaGridRangeX<int>(n_elements)) { int is = thread_id % param.ns; double s = param.ds * (static_cast<double>(is) - cents); double w = param.dsd * (param.dso * fabs(cos(s / param.dsd))); out[thread_id] = in[thread_id] * w; } return; } template <> bool FanWeighting<float>::calculate_on_gpu(const float* in, float* out, cudaStream_t stream) const { int n_elements = param_.ns*param_.nrow; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); if (param_.type == 1) { FanWeightingKernel<float> <<<config.block_count, config.thread_per_block, 0, stream>>> (in, out, param_, n_elements); } else if (param_.type == 2) { FlatWeightingKernel<float> <<<config.block_count, config.thread_per_block, 0, stream>>> (in, out, param_, n_elements); } else { return false; } cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <> bool FanWeighting<double>::calculate_on_gpu(const double* in, double* out, cudaStream_t stream) const { int n_elements = param_.ns*param_.nrow; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); if (param_.type == 1) { FanWeightingKernel<double> <<<config.block_count, config.thread_per_block, 0, stream>>> (in, out, param_, n_elements); } else if (param_.type == 2) { FlatWeightingKernel<double> <<<config.block_count, config.thread_per_block, 0, stream>>> (in, out, param_, n_elements); } else { return false; } cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <> bool FanWeightingGrad<float>::calculate_on_gpu(const float* in, float* out, cudaStream_t stream) const { int n_elements = param_.ns*param_.nrow; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); if (param_.type == 1) { FanWeightingGradKernel<float> <<<config.block_count, config.thread_per_block, 0, stream>>> (in, out, param_, n_elements); } else if (param_.type == 2) { FlatWeightingGradKernel<float> <<<config.block_count, config.thread_per_block, 0, stream>>> (in, out, param_, n_elements); } else { return false; } cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <> bool FanWeightingGrad<double>::calculate_on_gpu(const double* in, double* out, cudaStream_t stream) const { int n_elements = param_.ns*param_.nrow; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); if (param_.type == 1) { FanWeightingGradKernel<double> <<<config.block_count, config.thread_per_block, 0, stream>>> (in, out, param_, n_elements); } else if (param_.type == 2) { FlatWeightingGradKernel<double> <<<config.block_count, config.thread_per_block, 0, stream>>> (in, out, param_, n_elements); } else { return false; } cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } #endif // USE_CUDA } // namespace ct_recon