hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
a460dea88ea1580bb1b50851d2216c8e6c025e20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // nvcc -O3 -std=c++11 -use_fast_math -ccbin g++ -arch=compute_75 -code=sm_75 -expt-relaxed-constexpr // Performs matrix mutliplication using shared memory tiles where ewach thread // may need to calculate and move more than one data element. Assumes matrices // stored in row major order. The loop structure followed is as(one level // tiling) // // for(int i = 0; i < M; i += Mtile){ //Inter-Tile // for(int j = 0; j < N; j += Ntile){ //Inter-Tile // for(int k = 0; k < K; k += Ktile){ //Inter-Tile // for(int ii = i; ii < i + Mtile; ++ii){ //Intra-Tile // for(int jj = j; jj < j + Ntile; ++jj){ //Intra-Tile // for(int kk = k; kk < k + Ktile; ++kk){ //Intra-Tile // //body // } // } // } // } // } // } #include<iostream> #include<cuda_runtime.h> #include<cuda.h> #include<device_launch_parameters.h> #include "common.h" #define DTYPE float #define M 1024 #define N 1024 #define K 1024 #define MBLOCK 32 #define NBLOCK 32 #define Mtile 128 // This will actually be the loop step of `i` loop. #define Ntile 128 // This will actually be the loop step of `j` loop. #define Ktile 32 // This will actually be the loop step of `k` loop. using namespace std; __global__ void GEMM(DTYPE * a, DTYPE * b, DTYPE * c, int m, int n, int k){ // Reserve shared memory tiles if to put in the operands. __shared__ DTYPE asmem[Mtile * Ktile]; __shared__ DTYPE bsmem[Ktile * Ntile]; // Since the actual computation tile size is greater than than the thread // block tile size, therefore we want to find out what size of the output tile // is a register calculating. // Now each thread will compute an output tile of size (Mchunk, Nchunk). // Calculate the chunk of data each thread has to copy from the global memroy // to shared memeory. It is equal to the total number of data elements in a // Tile / total number of threads in a thread block. // TODO: Insert checks here to see if the if the tile size in elements is less than // the number of threads in a thread block. constexpr int Achunktocopy = (Mtile * Ktile) / (MBLOCK * NBLOCK); constexpr int Bchunktocopy = (Ktile * Ntile) / (MBLOCK * NBLOCK); // Find the iteration of the original loop nest that maps to this thread // block here. // It is more elegant to map the iterations instead of row or col. At the end // it doesn't matter becuase the iterations actually determine which row or // col is it. // In this particular launch setup with thread block sizes of (32, 32) and each // thread calculating one outptut element, the globalthreadId.x and // globalthreadId.y is actually the iterations we are looking for. // The Outer loops iteration beginning that this thread block tile // is responsible for. These coordinates also marks the beginning of the // address a thread block needs to copy form the global memory to shared // memory. This represents the coordinates in the data space not in the GPU // (processor) space. int i_iter_tile_base = blockIdx.y * Mtile; // Maps to inter-tile `i`. int j_iter_tile_base = blockIdx.x * Ntile; // Maps to inter-tile `j`. // The Global index start that this thread is responsible for computing. It // will caluclate (Mchunk, Nchunk) starting from these indexes. // int i = i_iter_tile_base + i_iter_thread_base; // int j = j_iter_tile_base + j_iter_thread_base; // Linear thread id in the thread block. int linear_tid = (threadIdx.z * blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; // Number of threads required to copy one row of A. int num_threads_to_copy_one_Arow = Ktile / Achunktocopy; // Number of threads required to copy one row of B. int num_threads_to_copy_one_Brow = Ntile / Bchunktocopy; DTYPE *c_tb_tile_base = c; DTYPE *a_tb_tile_base = a; DTYPE *b_tb_tile_base = b; DTYPE *c_tb_tile_offset = c_tb_tile_base + i_iter_tile_base * n + j_iter_tile_base; DTYPE *a_tb_tile_offset; DTYPE *b_tb_tile_offset; DTYPE *c_thread_tile_base = c_tb_tile_offset; DTYPE *a_thread_tile_base_copy; DTYPE *b_thread_tile_base_copy; DTYPE *a_thread_tile_base_compute; DTYPE *b_thread_tile_base_compute; DTYPE *c_thread_tile_offset; DTYPE *a_thread_tile_offset_copy; DTYPE *b_thread_tile_offset_copy; DTYPE *a_thread_tile_offset_compute; DTYPE *b_thread_tile_offset_compute; // Allocate a Ctile in registers of dimensions (Mchunk, Nchunk). // Dont know if this actually goes into the resgisters as register file cannot // be indexed. DTYPE Cout; Cout = 0.0f; // K dimension is sequential so this is not mapped to the gpu compute // heirarchy. Inter tile K-loop for(int kk = 0; kk < k; kk += Ktile){ // Base address in global tile of A & B operand thread block tile. a_tb_tile_offset = a_tb_tile_base + i_iter_tile_base * k + kk; b_tb_tile_offset = b_tb_tile_base + kk * n + j_iter_tile_base; a_thread_tile_base_copy = a_tb_tile_offset; b_thread_tile_base_copy = b_tb_tile_offset; a_thread_tile_base_compute = &asmem[0]; b_thread_tile_base_compute = &bsmem[0]; // Represents the row and col to copy by the corresponding thread in the thread block. It is not // the global row/col to copy, it is the row/col to copy relative to the thread block tile. int A_row_to_copy_in_global = linear_tid / num_threads_to_copy_one_Arow; int A_col_to_copy_in_global = linear_tid % num_threads_to_copy_one_Arow * Achunktocopy; int B_row_to_copy_in_global = linear_tid / num_threads_to_copy_one_Brow; int B_col_to_copy_in_global = linear_tid % num_threads_to_copy_one_Brow * Bchunktocopy; a_thread_tile_offset_copy = a_thread_tile_base_copy + A_row_to_copy_in_global * k + A_col_to_copy_in_global; b_thread_tile_offset_copy = b_thread_tile_base_copy + B_row_to_copy_in_global * n + B_col_to_copy_in_global; // Copy the operands from global to shared memory. Each thread copies the // `chunktocopy` elements from global to sharedm memory. The thread Id's // inside a thread block need to be linearized. Each thread copies it's // contiguous chunk form global memory to the shared memroy. #pragma unroll for(int cpi = 0; cpi < Achunktocopy; ++cpi){ asmem[linear_tid * Achunktocopy + cpi] = a_thread_tile_offset_copy[cpi]; } #pragma unroll for(int cpi = 0; cpi < Bchunktocopy; ++cpi){ bsmem[linear_tid * Bchunktocopy + cpi] = b_thread_tile_offset_copy[cpi]; } __syncthreads(); // Start the computation using fast memory buffers. // This is the amount of work done by one thread i.e., computaion of one // (Mchunk, Nchunk) tile in output. // Do not use the global thread indices `i` and `j` here. We only need the // thread info inside a thread block and hence we need to start it with. // Evertyhing changes here `i_iter`, `j_iter`, `k_iter` were being used to // denote the global iteration that was taking place but now since things // have to be indexed in the shared memory now we cannot use `i_iter`, // `j_iter` and `k_iter` to index them. Now `i_iter` and `j_iter` is set to // use the thread identifier within the thread block. `k_iter` is set to // start from zero and then go upto `ktile`. // The Inner loop iteration beginning that this thread block tile is // responsible for. #pragma unroll for(int i_iter_thread_base = threadIdx.y; i_iter_thread_base < Mtile; i_iter_thread_base+=MBLOCK){ #pragma unroll for(int j_iter_thread_base = threadIdx.x; j_iter_thread_base < Ntile; j_iter_thread_base+=NBLOCK){ c_thread_tile_offset = c_thread_tile_base + i_iter_thread_base * n + j_iter_thread_base; a_thread_tile_offset_compute = a_thread_tile_base_compute + i_iter_thread_base * Ktile; b_thread_tile_offset_compute = b_thread_tile_base_compute + j_iter_thread_base; // Intra-tile K-loop. #pragma unroll for(int k_iter = 0; k_iter < Ktile; ++k_iter){ // This statement now uses the shared memory fast buffers. Cout += a_thread_tile_offset_compute[k_iter] * b_thread_tile_offset_compute[k_iter * Ntile]; } // Write back the result to the output matrix. *c_thread_tile_offset = Cout; } } __syncthreads(); } } void hostGEMM(DTYPE * a, DTYPE * b, DTYPE * c, int m, int n, int k){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j ){ DTYPE temp = 0; for(int kk = 0; kk < k ; ++kk){ temp += a[i * k + kk] * b[kk * n + j]; } c[i * n + j] = temp; } } } bool compareGEMM(DTYPE * h_c, DTYPE * h_c_gpu_res, int m, int n){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j ){ if(abs(h_c[i * n + j] - h_c_gpu_res[i * n + j]) > 1e-4) return false; } } return true; } void initMatrix(DTYPE * matrix, int m, int n){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j){ matrix[i * n + j] = static_cast <DTYPE> (rand()) / static_cast <DTYPE> (RAND_MAX); } } } void printMatrix(DTYPE * matrix, int m, int n){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j){ cout<<matrix[i * n + j]<<" "; } cout<<endl; } cout<<endl; } int main(){ DTYPE *d_a, *d_b, *d_c, *h_a, *h_b, *h_c, *h_c_gpu_res; int m ,n, k; m = M; n = N; k = K; h_a = (DTYPE*) malloc(m * k * sizeof(DTYPE)); h_b = (DTYPE*) malloc(k * n * sizeof(DTYPE)); h_c = (DTYPE*) malloc(m * n * sizeof(DTYPE)); h_c_gpu_res = (DTYPE*) malloc(m * n * sizeof(DTYPE)); check_cuda_error(hipMalloc(&d_a, m * k * sizeof(DTYPE))); check_cuda_error(hipMalloc(&d_b, k * n * sizeof(DTYPE))); check_cuda_error(hipMalloc(&d_c, m * n * sizeof(DTYPE))); initMatrix(h_a, m , k); initMatrix(h_b, k , n); initMatrix(h_c_gpu_res, m , n); check_cuda_error(hipMemcpy(d_a, h_a, m * k * sizeof(DTYPE), hipMemcpyHostToDevice)); check_cuda_error(hipMemcpy(d_b, h_b, k * n * sizeof(DTYPE), hipMemcpyHostToDevice)); dim3 block(NBLOCK, MBLOCK, 1); dim3 grid((n + Ntile - 1) / Ntile, (m + Mtile - 1) / Mtile, 1); //printf("%d, %d, %d\n", block.x, block.y, block.z); //printf("%d, %d, %d\n", grid.x, grid.y, grid.z); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, NULL); hipLaunchKernelGGL(( GEMM), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, m , n, k); hipEventRecord(stop, NULL); hipEventSynchronize(stop); float msecTotal = 0.0f; hipEventElapsedTime(&msecTotal, start, stop); double flopsPerMatrixMul = 2.0 * (double) m * (double) n * (double) k; double teraFlops = (flopsPerMatrixMul * 1.0e-12f) / (msecTotal / 1000.0f); cout<<"PERF: "<<teraFlops<<"Tflops \n"; check_cuda_error(hipPeekAtLastError()); check_cuda_error(hipDeviceSynchronize()); hipMemcpy(h_c_gpu_res, d_c, m * n * sizeof(DTYPE), hipMemcpyDeviceToHost); hostGEMM(h_a, h_b, h_c, m, n, k); if(compareGEMM(h_c, h_c_gpu_res, m, n)) cout<<"Success!\n"; else cout<<"Output does not match!\n"; //printMatrix(h_c, m, n); //cout<<"output gpu\n"; //printMatrix(h_c_gpu_res, m, n); free(h_a); free(h_b); free(h_c); free(h_c_gpu_res); // The Global index start that this thread is responsible for.hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
a460dea88ea1580bb1b50851d2216c8e6c025e20.cu
// nvcc -O3 -std=c++11 -use_fast_math -ccbin g++ -arch=compute_75 -code=sm_75 -expt-relaxed-constexpr // Performs matrix mutliplication using shared memory tiles where ewach thread // may need to calculate and move more than one data element. Assumes matrices // stored in row major order. The loop structure followed is as(one level // tiling) // // for(int i = 0; i < M; i += Mtile){ //Inter-Tile // for(int j = 0; j < N; j += Ntile){ //Inter-Tile // for(int k = 0; k < K; k += Ktile){ //Inter-Tile // for(int ii = i; ii < i + Mtile; ++ii){ //Intra-Tile // for(int jj = j; jj < j + Ntile; ++jj){ //Intra-Tile // for(int kk = k; kk < k + Ktile; ++kk){ //Intra-Tile // //body // } // } // } // } // } // } #include<iostream> #include<cuda_runtime.h> #include<cuda.h> #include<device_launch_parameters.h> #include "common.h" #define DTYPE float #define M 1024 #define N 1024 #define K 1024 #define MBLOCK 32 #define NBLOCK 32 #define Mtile 128 // This will actually be the loop step of `i` loop. #define Ntile 128 // This will actually be the loop step of `j` loop. #define Ktile 32 // This will actually be the loop step of `k` loop. using namespace std; __global__ void GEMM(DTYPE * a, DTYPE * b, DTYPE * c, int m, int n, int k){ // Reserve shared memory tiles if to put in the operands. __shared__ DTYPE asmem[Mtile * Ktile]; __shared__ DTYPE bsmem[Ktile * Ntile]; // Since the actual computation tile size is greater than than the thread // block tile size, therefore we want to find out what size of the output tile // is a register calculating. // Now each thread will compute an output tile of size (Mchunk, Nchunk). // Calculate the chunk of data each thread has to copy from the global memroy // to shared memeory. It is equal to the total number of data elements in a // Tile / total number of threads in a thread block. // TODO: Insert checks here to see if the if the tile size in elements is less than // the number of threads in a thread block. constexpr int Achunktocopy = (Mtile * Ktile) / (MBLOCK * NBLOCK); constexpr int Bchunktocopy = (Ktile * Ntile) / (MBLOCK * NBLOCK); // Find the iteration of the original loop nest that maps to this thread // block here. // It is more elegant to map the iterations instead of row or col. At the end // it doesn't matter becuase the iterations actually determine which row or // col is it. // In this particular launch setup with thread block sizes of (32, 32) and each // thread calculating one outptut element, the globalthreadId.x and // globalthreadId.y is actually the iterations we are looking for. // The Outer loops iteration beginning that this thread block tile // is responsible for. These coordinates also marks the beginning of the // address a thread block needs to copy form the global memory to shared // memory. This represents the coordinates in the data space not in the GPU // (processor) space. int i_iter_tile_base = blockIdx.y * Mtile; // Maps to inter-tile `i`. int j_iter_tile_base = blockIdx.x * Ntile; // Maps to inter-tile `j`. // The Global index start that this thread is responsible for computing. It // will caluclate (Mchunk, Nchunk) starting from these indexes. // int i = i_iter_tile_base + i_iter_thread_base; // int j = j_iter_tile_base + j_iter_thread_base; // Linear thread id in the thread block. int linear_tid = (threadIdx.z * blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; // Number of threads required to copy one row of A. int num_threads_to_copy_one_Arow = Ktile / Achunktocopy; // Number of threads required to copy one row of B. int num_threads_to_copy_one_Brow = Ntile / Bchunktocopy; DTYPE *c_tb_tile_base = c; DTYPE *a_tb_tile_base = a; DTYPE *b_tb_tile_base = b; DTYPE *c_tb_tile_offset = c_tb_tile_base + i_iter_tile_base * n + j_iter_tile_base; DTYPE *a_tb_tile_offset; DTYPE *b_tb_tile_offset; DTYPE *c_thread_tile_base = c_tb_tile_offset; DTYPE *a_thread_tile_base_copy; DTYPE *b_thread_tile_base_copy; DTYPE *a_thread_tile_base_compute; DTYPE *b_thread_tile_base_compute; DTYPE *c_thread_tile_offset; DTYPE *a_thread_tile_offset_copy; DTYPE *b_thread_tile_offset_copy; DTYPE *a_thread_tile_offset_compute; DTYPE *b_thread_tile_offset_compute; // Allocate a Ctile in registers of dimensions (Mchunk, Nchunk). // Dont know if this actually goes into the resgisters as register file cannot // be indexed. DTYPE Cout; Cout = 0.0f; // K dimension is sequential so this is not mapped to the gpu compute // heirarchy. Inter tile K-loop for(int kk = 0; kk < k; kk += Ktile){ // Base address in global tile of A & B operand thread block tile. a_tb_tile_offset = a_tb_tile_base + i_iter_tile_base * k + kk; b_tb_tile_offset = b_tb_tile_base + kk * n + j_iter_tile_base; a_thread_tile_base_copy = a_tb_tile_offset; b_thread_tile_base_copy = b_tb_tile_offset; a_thread_tile_base_compute = &asmem[0]; b_thread_tile_base_compute = &bsmem[0]; // Represents the row and col to copy by the corresponding thread in the thread block. It is not // the global row/col to copy, it is the row/col to copy relative to the thread block tile. int A_row_to_copy_in_global = linear_tid / num_threads_to_copy_one_Arow; int A_col_to_copy_in_global = linear_tid % num_threads_to_copy_one_Arow * Achunktocopy; int B_row_to_copy_in_global = linear_tid / num_threads_to_copy_one_Brow; int B_col_to_copy_in_global = linear_tid % num_threads_to_copy_one_Brow * Bchunktocopy; a_thread_tile_offset_copy = a_thread_tile_base_copy + A_row_to_copy_in_global * k + A_col_to_copy_in_global; b_thread_tile_offset_copy = b_thread_tile_base_copy + B_row_to_copy_in_global * n + B_col_to_copy_in_global; // Copy the operands from global to shared memory. Each thread copies the // `chunktocopy` elements from global to sharedm memory. The thread Id's // inside a thread block need to be linearized. Each thread copies it's // contiguous chunk form global memory to the shared memroy. #pragma unroll for(int cpi = 0; cpi < Achunktocopy; ++cpi){ asmem[linear_tid * Achunktocopy + cpi] = a_thread_tile_offset_copy[cpi]; } #pragma unroll for(int cpi = 0; cpi < Bchunktocopy; ++cpi){ bsmem[linear_tid * Bchunktocopy + cpi] = b_thread_tile_offset_copy[cpi]; } __syncthreads(); // Start the computation using fast memory buffers. // This is the amount of work done by one thread i.e., computaion of one // (Mchunk, Nchunk) tile in output. // Do not use the global thread indices `i` and `j` here. We only need the // thread info inside a thread block and hence we need to start it with. // Evertyhing changes here `i_iter`, `j_iter`, `k_iter` were being used to // denote the global iteration that was taking place but now since things // have to be indexed in the shared memory now we cannot use `i_iter`, // `j_iter` and `k_iter` to index them. Now `i_iter` and `j_iter` is set to // use the thread identifier within the thread block. `k_iter` is set to // start from zero and then go upto `ktile`. // The Inner loop iteration beginning that this thread block tile is // responsible for. #pragma unroll for(int i_iter_thread_base = threadIdx.y; i_iter_thread_base < Mtile; i_iter_thread_base+=MBLOCK){ #pragma unroll for(int j_iter_thread_base = threadIdx.x; j_iter_thread_base < Ntile; j_iter_thread_base+=NBLOCK){ c_thread_tile_offset = c_thread_tile_base + i_iter_thread_base * n + j_iter_thread_base; a_thread_tile_offset_compute = a_thread_tile_base_compute + i_iter_thread_base * Ktile; b_thread_tile_offset_compute = b_thread_tile_base_compute + j_iter_thread_base; // Intra-tile K-loop. #pragma unroll for(int k_iter = 0; k_iter < Ktile; ++k_iter){ // This statement now uses the shared memory fast buffers. Cout += a_thread_tile_offset_compute[k_iter] * b_thread_tile_offset_compute[k_iter * Ntile]; } // Write back the result to the output matrix. *c_thread_tile_offset = Cout; } } __syncthreads(); } } void hostGEMM(DTYPE * a, DTYPE * b, DTYPE * c, int m, int n, int k){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j ){ DTYPE temp = 0; for(int kk = 0; kk < k ; ++kk){ temp += a[i * k + kk] * b[kk * n + j]; } c[i * n + j] = temp; } } } bool compareGEMM(DTYPE * h_c, DTYPE * h_c_gpu_res, int m, int n){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j ){ if(abs(h_c[i * n + j] - h_c_gpu_res[i * n + j]) > 1e-4) return false; } } return true; } void initMatrix(DTYPE * matrix, int m, int n){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j){ matrix[i * n + j] = static_cast <DTYPE> (rand()) / static_cast <DTYPE> (RAND_MAX); } } } void printMatrix(DTYPE * matrix, int m, int n){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j){ cout<<matrix[i * n + j]<<" "; } cout<<endl; } cout<<endl; } int main(){ DTYPE *d_a, *d_b, *d_c, *h_a, *h_b, *h_c, *h_c_gpu_res; int m ,n, k; m = M; n = N; k = K; h_a = (DTYPE*) malloc(m * k * sizeof(DTYPE)); h_b = (DTYPE*) malloc(k * n * sizeof(DTYPE)); h_c = (DTYPE*) malloc(m * n * sizeof(DTYPE)); h_c_gpu_res = (DTYPE*) malloc(m * n * sizeof(DTYPE)); check_cuda_error(cudaMalloc(&d_a, m * k * sizeof(DTYPE))); check_cuda_error(cudaMalloc(&d_b, k * n * sizeof(DTYPE))); check_cuda_error(cudaMalloc(&d_c, m * n * sizeof(DTYPE))); initMatrix(h_a, m , k); initMatrix(h_b, k , n); initMatrix(h_c_gpu_res, m , n); check_cuda_error(cudaMemcpy(d_a, h_a, m * k * sizeof(DTYPE), cudaMemcpyHostToDevice)); check_cuda_error(cudaMemcpy(d_b, h_b, k * n * sizeof(DTYPE), cudaMemcpyHostToDevice)); dim3 block(NBLOCK, MBLOCK, 1); dim3 grid((n + Ntile - 1) / Ntile, (m + Mtile - 1) / Mtile, 1); //printf("%d, %d, %d\n", block.x, block.y, block.z); //printf("%d, %d, %d\n", grid.x, grid.y, grid.z); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, NULL); GEMM<<<grid, block>>>(d_a, d_b, d_c, m , n, k); cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); float msecTotal = 0.0f; cudaEventElapsedTime(&msecTotal, start, stop); double flopsPerMatrixMul = 2.0 * (double) m * (double) n * (double) k; double teraFlops = (flopsPerMatrixMul * 1.0e-12f) / (msecTotal / 1000.0f); cout<<"PERF: "<<teraFlops<<"Tflops \n"; check_cuda_error(cudaPeekAtLastError()); check_cuda_error(cudaDeviceSynchronize()); cudaMemcpy(h_c_gpu_res, d_c, m * n * sizeof(DTYPE), cudaMemcpyDeviceToHost); hostGEMM(h_a, h_b, h_c, m, n, k); if(compareGEMM(h_c, h_c_gpu_res, m, n)) cout<<"Success!\n"; else cout<<"Output does not match!\n"; //printMatrix(h_c, m, n); //cout<<"output gpu\n"; //printMatrix(h_c_gpu_res, m, n); free(h_a); free(h_b); free(h_c); free(h_c_gpu_res); // The Global index start that this thread is responsible for.cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
6d93a47dd86fc46e3b20663870a735e1c3f381ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * SASAPotential.cu * * Created on: Aug 6, 2010 * Author: zhmurov */ #include "../Core/global.h" #include "../Core/md.cuh" #include "sasa.h" #include "SASAPotential.cuh" namespace sasa_potential { class Log: public ILog { virtual void Write(const char* message) const { std::cout << makeTimePrefix() << "<sasa_potential> " << message << std::endl; } } log; #define LOG LogStream(log) //__global__ void computeSASAPotentialB_kernel(); void create(){ if (!getYesNoParameter(PARAMETER_SASA_ON, 0)) return; potential.compute = &compute; potential.destroy = &destroy; sprintf(potential.name, "SASA potential"); potentials[potentialsCount] = &potential; potentialsCount ++; sasaPairsListUpdater.update = updateSASAPairsList; sasaPairsListUpdater.destroy = destroySASAPairsListUpdater; sasaPairsListUpdater.frequency = getIntegerParameter(PARAMETER_SASA_PAIRS_FREQ); sprintf(sasaPairsListUpdater.name, "SASA updater"); updaters[updatersCount] = &sasaPairsListUpdater; updatersCount ++; energyOutput.computeValues = &computeEnergy; allocateCPU((void**)&energyOutput.values, parameters.Ntr*sizeof(float)); strcpy(energyOutput.name, ENERGY_OUTPUT_NAME_SASA); energyOutputs[energyOutputsCount] = &energyOutput; energyOutputsCount ++; //if(deviceProps.major == 2){ //hipFuncSetCacheConfig(computeSASAPotentialB_kernel, hipFuncCachePreferL1); //} init(); } void init(){ LOG << "Initializing SASA implicit solvent potential..."; int i, j; pairlistsExtension = getIntegerParameter(PARAMETER_PAIRSLISTS_EXTENSION, DEFAULT_PAIRSLISTS_EXTENSION); sasaData.Rprobe = getFloatParameter(PARAMETER_SASA_RPROBE, DEFAULT_SASA_RPROBE); sasaData.pij_cov = getFloatParameter(PARAMETER_SASA_PIJ_COV, DEFAULT_SASA_PIJ_COV); sasaData.pij_nb = getFloatParameter(PARAMETER_SASA_PIJ_NB, DEFAULT_SASA_PIJ_NB); sasaData.sasaPairsListCutoff = getFloatParameter(PARAMETER_SASA_PAIRSLIST_CUTOFF); allocateCPU((void**)&sasaData.h_threadAtom, gsystem.Ntot*sizeof(int)); allocateGPU((void**)&sasaData.d_threadAtom, gsystem.Ntot*sizeof(int)); allocateCPU((void**)&sasaData.h_atomThread, gsystem.Ntot*sizeof(int)); allocateGPU((void**)&sasaData.d_atomThread, gsystem.Ntot*sizeof(int)); int hydrogenCount = 0; sasaData.threadsCount = 0; for(i = 0; i < gsystem.N; i++){ if(topology.atoms[i].type[0] == 'H'){ hydrogenCount ++; sasaData.h_atomThread[i] = -1; } else { sasaData.h_atomThread[i] = sasaData.threadsCount; sasaData.h_threadAtom[sasaData.threadsCount] = i; sasaData.threadsCount ++; } } /* for(i = 0; i < sasaData.threadsCount; i++){ printf("%d: %s - %d\n", i, topology.atoms[sasaData.h_threadAtom[i]].name, sasaData.h_threadAtom[i]); }*/ sasaData.threadsCountTot = sasaData.threadsCount*parameters.Ntr; sasaBlockSize = BLOCK_SIZE; sasaBlockCount = sasaData.threadsCountTot/BLOCK_SIZE + 1; sasaPairsListBlockSize = BLOCK_SIZE; sasaPairsListBlockCount = sasaData.threadsCountTot/BLOCK_SIZE + 1; if(sasaData.threadsCountTot % 16 == 0){ sasaData.widthTot = sasaData.threadsCountTot; } else { sasaData.widthTot = (sasaData.threadsCountTot/16)*16 + 16; } LOG << "Found " << hydrogenCount << " hydrogens, " << gsystem.N - hydrogenCount << " non-hydrogens"; LOG << "Arrays will be aligned to the width of " << sasaData.widthTot; //allocateCPU((void**)&sasaData.h_sasaParameters, atomTypesCount*sizeof(GSASAParameters)); //allocateGPU((void**)&sasaData.d_sasaParameters, atomTypesCount*sizeof(GSASAParameters)); allocateCPU((void**)&sasaData.h_sasaRipr, atomTypesCount*sizeof(float)); allocateCPU((void**)&sasaData.h_sasaRipr2, atomTypesCount*sizeof(float)); allocateCPU((void**)&sasaData.h_sasaPiOverSi, atomTypesCount*sizeof(float)); allocateCPU((void**)&sasaData.h_sasaSigmaSi, atomTypesCount*sizeof(float)); allocateCPU((void**)&sasaData.h_sasaSi, atomTypesCount*sizeof(float)); allocateGPU((void**)&sasaData.d_sasaRipr, atomTypesCount*sizeof(float)); allocateGPU((void**)&sasaData.d_sasaRipr2, atomTypesCount*sizeof(float)); allocateGPU((void**)&sasaData.d_sasaPiOverSi, atomTypesCount*sizeof(float)); allocateGPU((void**)&sasaData.d_sasaSigmaSi, atomTypesCount*sizeof(float)); allocateGPU((void**)&sasaData.d_sasaSi, atomTypesCount*sizeof(float)); char sasaFilename[100]; getMaskedParameter(sasaFilename, PARAMETER_SASA_PARAMETERS_FILE); readSASAParameters(sasaFilename); for(i = 0; i < atomTypesCount; i++){ SASAParameters param = getSASAParametersCHARMM(atomTypes[i].name); float Ripr = (param.R + sasaData.Rprobe); float Si = 4.0f*M_PI*Ripr*Ripr; //sasaData.h_sasaParameters[i].Ripr = Ripr; //sasaData.h_sasaParameters[i].Ripr2 = Ripr*Ripr; //sasaData.h_sasaParameters[i].piOverSi = param.p/Si; //sasaData.h_sasaParameters[i].sigmaSi = param.sigma*Si; sasaData.h_sasaRipr[i] = Ripr;//sasaData.h_sasaParameters[i].Ripr; sasaData.h_sasaRipr2[i] = Ripr*Ripr;//sasaData.h_sasaParameters[i].Ripr2; sasaData.h_sasaPiOverSi[i] = param.p/Si;//sasaData.h_sasaParameters[i].piOverSi; sasaData.h_sasaSigmaSi[i] = param.sigma*Si;//sasaData.h_sasaParameters[i].sigmaSi; sasaData.h_sasaSi[i] = Si; } allocateCPU((void**)&sasaData.h_pairs12Counts, sasaData.threadsCountTot*sizeof(int)); allocateGPU((void**)&sasaData.d_pairs12Counts, sasaData.threadsCountTot*sizeof(int)); allocateCPU((void**)&sasaData.h_pairsListCount, sasaData.threadsCountTot*sizeof(int)); allocateGPU((void**)&sasaData.d_pairsListCount, sasaData.threadsCountTot*sizeof(int)); allocateCPU((void**)&sasaData.h_sasaListCount, sasaData.threadsCountTot*sizeof(int)); allocateGPU((void**)&sasaData.d_sasaListCount, sasaData.threadsCountTot*sizeof(int)); LOG << "Counting members of current SASA pairlist..."; int totalSASAPairlistCount = 0; int totalSASAListCount = 0; for(i = 0; i < sasaData.threadsCountTot; i++){ sasaData.h_pairsListCount[i] = 0; sasaData.h_sasaListCount[i] = 0; } sasaData.maxSASAPairsPerAtom = getIntegerParameter(PARAMETER_MAX_PAIRS_SASA, 0); sasaData.maxPairsListItemsPerAtom = getIntegerParameter(PARAMETER_MAX_PAIRLIST_SASA, 0); if((sasaData.maxSASAPairsPerAtom != 0 || sasaData.maxPairsListItemsPerAtom != 0) && (sasaData.maxSASAPairsPerAtom * sasaData.maxPairsListItemsPerAtom == 0)){ LOG << "Sizes for both SASA lists should be defined. " "Re-counting will be forced."; } if(sasaData.maxSASAPairsPerAtom == 0 || sasaData.maxPairsListItemsPerAtom == 0){ float4 r1, r2; int a1, a2; for(i = 0; i < sasaData.threadsCount; i++){ a1 = sasaData.h_threadAtom[i]; if(topology.atoms[a1].type[0] != 'H'){ r1 = gsystem.h_coord[a1]; for(j = 0; j < i; j++){ a2 = sasaData.h_threadAtom[j]; if(topology.atoms[a2].type[0] != 'H'){ r2 = gsystem.h_coord[a2]; r2.x = r2.x - r1.x; r2.y = r2.y - r1.y; r2.z = r2.z - r1.z; r2.w = sqrtf(r2.x*r2.x + r2.y*r2.y + r2.z*r2.z); if(r2.w < sasaData.sasaPairsListCutoff){ sasaData.h_pairsListCount[i] ++; sasaData.h_pairsListCount[j] ++; totalSASAPairlistCount ++; if(r2.w < sasaData.h_sasaRipr[gsystem.h_atomTypes[a1]] + sasaData.h_sasaRipr[gsystem.h_atomTypes[a2]]){ sasaData.h_sasaListCount[i] ++; sasaData.h_sasaListCount[j] ++; totalSASAListCount ++; } } } } } } sasaData.maxPairsListItemsPerAtom = 0; sasaData.maxSASAPairsPerAtom = 0; for(i = 0; i < sasaData.threadsCount; i++){ if(sasaData.maxPairsListItemsPerAtom < sasaData.h_pairsListCount[i]){ sasaData.maxPairsListItemsPerAtom = sasaData.h_pairsListCount[i]; } if(sasaData.maxSASAPairsPerAtom < sasaData.h_sasaListCount[i]){ sasaData.maxSASAPairsPerAtom = sasaData.h_sasaListCount[i]; } } const int tw = 10; // Width of table field LOG << ""; LOG.table(tw,3) << "Pairs" << "Total" << "Max/Atom"; LOG.table(tw,3) << "SASA" << totalSASAListCount << sasaData.maxSASAPairsPerAtom; LOG.table(tw,3) << "Pairlist" << totalSASAPairlistCount << sasaData.maxPairsListItemsPerAtom; LOG << ""; } else { LOG << "Using pre-defined list sizes of " << sasaData.maxSASAPairsPerAtom << " for SASA list and " << sasaData.maxPairsListItemsPerAtom << " for SASA pairlist"; LOG << "Actual numbers of pairs in changeable lists were not yet computed and will be zeros in following table."; } DPRINTF("Pairs: Total: Max/Atom:\n"); DPRINTF("SASA List %10d %10d\n", totalSASAListCount, sasaData.maxSASAPairsPerAtom); DPRINTF("Pairlist %10d %10d\n", totalSASAPairlistCount, sasaData.maxPairsListItemsPerAtom); LOG << "Adding " << pairlistsExtension << " to the length of all changeable lists to avoid memory conflicts"; sasaData.maxSASAPairsPerAtom += 4; //For covalent bonds sasaData.maxSASAPairsPerAtom += pairlistsExtension; sasaData.maxPairsListItemsPerAtom += pairlistsExtension; allocateCPU((void**)&sasaData.h_pairsList, sasaData.widthTot*sasaData.maxPairsListItemsPerAtom*sizeof(int)); allocateGPU((void**)&sasaData.d_pairsList, sasaData.widthTot*sasaData.maxPairsListItemsPerAtom*sizeof(int)); allocateCPU((void**)&sasaData.h_BGrad, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float4)); allocateGPU((void**)&sasaData.d_BGrad, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float4)); allocateCPU((void**)&sasaData.h_B, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float)); allocateGPU((void**)&sasaData.d_B, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float)); allocateCPU((void**)&sasaData.h_BGradT, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float4)); allocateGPU((void**)&sasaData.d_BGradT, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float4)); allocateCPU((void**)&sasaData.h_BT, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float)); allocateGPU((void**)&sasaData.d_BT, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float)); allocateCPU((void**)&sasaData.h_sasaList, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(int)); allocateGPU((void**)&sasaData.d_sasaList, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(int)); for(i = 0; i < sasaData.threadsCount; i++){ sasaData.h_pairs12Counts[i] = 0; } int b; for(b = 0; b < topology.bondCount; b++){ Bond bond = topology.bonds[b]; i = sasaData.h_atomThread[bond.i]; j = sasaData.h_atomThread[bond.j]; if(topology.atoms[bond.i].type[0] != 'H' && topology.atoms[bond.j].type[0] != 'H'){ sasaData.h_sasaList[sasaData.h_pairs12Counts[i]*sasaData.widthTot + i] = bond.j; sasaData.h_sasaList[sasaData.h_pairs12Counts[j]*sasaData.widthTot + j] = bond.i; sasaData.h_pairs12Counts[i] ++; sasaData.h_pairs12Counts[j] ++; } } /*for(i = 0; i < sasaData.threadsCount; i++){ a1 = sasaData.h_threadAtom[i]; sasaData.h_pairs12Counts[i] = pairsListsData.h_pairs12ListCount[a1]; //printf("(%s)%d\n", topology.atoms[a1].name, sasaData.h_pairs12Counts[i]); for(j = 0; j < pairsListsData.h_pairs12ListCount[a1]; j++){ sasaData.h_sasaList[j*sasaData.widthTot + i] = pairsListsData.h_pairs12List[j*gsystem.widthTot + a1]; } }*/ allocateCPU((void**)&sasaData.h_sasa, gsystem.Ntot*sizeof(float)); allocateGPU((void**)&sasaData.d_sasa, gsystem.Ntot*sizeof(float)); allocateCPU((void**)&sasaData.h_sasaEnergies, gsystem.Ntot*sizeof(float)); allocateGPU((void**)&sasaData.d_sasaEnergies, gsystem.Ntot*sizeof(float)); for(i = 0; i < gsystem.Ntot; i++){ sasaData.h_sasaEnergies[i] = 0.0f; } /*printf("\n\nList of Covalent Bonds (in SASA):\n"); for(i = 0; i < sasaData.threadsCount; i++){ printf("%d (%d): ", i, sasaData.h_pairs12Counts[i]); for(j = 0; j < sasaData.h_pairs12Counts[i]; j++){ printf("%d ", sasaData.h_sasaList[j*sasaData.widthTot + i]); } printf("\n"); }*/ int traj, itot; for(traj = 1; traj < parameters.Ntr; traj++){ for(i = 0; i < sasaData.threadsCount; i++){ itot = traj*sasaData.threadsCount + i; sasaData.h_threadAtom[itot] = sasaData.h_threadAtom[i] + traj*gsystem.N; sasaData.h_pairs12Counts[itot] = sasaData.h_pairs12Counts[i]; for(j = 0; j < sasaData.h_pairs12Counts[i]; j++){ sasaData.h_sasaList[j*sasaData.widthTot + itot] = sasaData.h_sasaList[j*sasaData.widthTot + i] + traj*gsystem.N; } } } DPRINTF("SASA Parameters:\n"); DPRINTF("Atom type:\tR_ipr: \t\t(R_i+R_pr)^2: \t\tp_i/S_i: \t\tsigma*S_i\n"); for(i = 0; i < atomTypesCount; i++){ DPRINTF("%d.%s\t\t%6.4f\t\t%6.4f\t\t%6.4f\t\t%6.4f\n", i, atomTypes[i].name, sasaData.h_sasaRipr[i], sasaData.h_sasaRipr2[i], sasaData.h_sasaPiOverSi[i], sasaData.h_sasaSigmaSi[i]); } //hipMemcpy(sasaData.d_sasaParameters, sasaData.h_sasaParameters, // atomTypesCount*sizeof(GSASAParameters), hipMemcpyHostToDevice); hipMemcpy(sasaData.d_sasaRipr, sasaData.h_sasaRipr, atomTypesCount*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(sasaData.d_sasaRipr2, sasaData.h_sasaRipr2, atomTypesCount*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(sasaData.d_sasaPiOverSi, sasaData.h_sasaPiOverSi, atomTypesCount*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(sasaData.d_sasaSigmaSi, sasaData.h_sasaSigmaSi, atomTypesCount*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(sasaData.d_sasaSi, sasaData.h_sasaSi, atomTypesCount*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(sasaData.d_threadAtom, sasaData.h_threadAtom, sasaData.threadsCountTot*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(sasaData.d_pairs12Counts, sasaData.h_pairs12Counts, sasaData.threadsCountTot*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(sasaData.d_sasaList, sasaData.h_sasaList, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(sasaData.d_sasaEnergies, sasaData.h_sasaEnergies, gsystem.Ntot*sizeof(float), hipMemcpyHostToDevice); /*hipMemcpyToSymbol(c_sasaParameters, sasaData.h_sasaParameters, atomTypesCount*sizeof(GSASAParameters), 0);*/ hipMemcpyToSymbol(c_sasaData, &sasaData, sizeof(GSASAData), 0); hipBindTexture(0, t_sasaRipr, sasaData.d_sasaRipr, atomTypesCount*sizeof(float)); hipBindTexture(0, t_sasaRipr2, sasaData.d_sasaRipr2, atomTypesCount*sizeof(float)); hipBindTexture(0, t_sasaPiOverSi, sasaData.d_sasaPiOverSi, atomTypesCount*sizeof(float)); hipBindTexture(0, t_sasaSigmaSi, sasaData.d_sasaSigmaSi, atomTypesCount*sizeof(float)); /*hipBindTexture(0, t_sasaBGrad, sasaData.d_BGrad, gsystem.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float4)); hipBindTexture(0, t_sasaB, sasaData.d_B, gsystem.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float));*/ LOG << "Done initializing SASA implicit solvent potential."; } __global__ void generateSASAList_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_sasaData.threadsCountTot){ int i = c_sasaData.d_threadAtom[d_i]; float4 r1 = tex1Dfetch(t_coord, i); r1.w = tex1Dfetch(t_sasaRipr, (int)r1.w);//c_sasaParameters[(int)r1.w].Ripr; int sasaCount = c_sasaData.d_pairs12Counts[d_i]; for(i = 0; i < c_sasaData.d_pairsListCount[d_i]; i++){ float mult; float4 r2; int j = c_sasaData.d_pairsList[i*c_sasaData.widthTot + d_i]; r2 = tex1Dfetch(t_coord, j);//abs(j)); mult = tex1Dfetch(t_sasaRipr, (int)r2.w);//c_sasaParameters[(int)r2.w].Ripr; mult += r1.w; r2 -= r1; DO_PBC(r2); r2.w = sqrtf(r2.x*r2.x + r2.y*r2.y + r2.z*r2.z); if(r2.w < mult){ c_sasaData.d_sasaList[sasaCount*c_sasaData.widthTot + d_i] = j; /*c_sasaData.d_sasaListPij[sasaCount*c_sasaData.widthTot + d_i] = c_sasaData.d_pairsListPij[i*c_sasaData.widthTot + d_i];*/ //c_sasaData.d_sasadrs[sasaCount*c_sasaData.widthTot + d_i] = r2; sasaCount ++; } } c_sasaData.d_sasaListCount[d_i] = sasaCount; } } __global__ void computeSASAPotentialB_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_sasaData.threadsCountTot){ int i = c_sasaData.d_threadAtom[d_i]; int ati = c_gsystem.d_atomTypes[i]; float4 B; float4 dr; s_Ri[threadIdx.x] = tex1Dfetch(t_coord, i); int atj; float pij; int covalentCount = c_sasaData.d_pairs12Counts[d_i]; for(i = 0; i < c_sasaData.d_sasaListCount[d_i]; i++){ atj = c_sasaData.d_sasaList[i*c_sasaData.widthTot + d_i]; if(i < covalentCount){ pij = c_sasaData.pij_cov; } else { pij = c_sasaData.pij_nb; } //atj = abs(atj); //float4 dr = c_sasaData.d_sasadrs[i*c_sasaData.widthTot + d_i]; B = s_Ri[threadIdx.x]; dr = tex1Dfetch(t_coord, atj); dr -= B; DO_PBC(dr); dr.w = sqrtf(dr.x*dr.x + dr.y*dr.y + dr.z*dr.z); atj = c_gsystem.d_atomTypes[atj]; B.w = dr.w*dr.w; B.x = tex1Dfetch(t_sasaRipr2, ati); B.y = tex1Dfetch(t_sasaRipr2, atj); B.w = (B.y - B.x)/B.w; //B.w = (c_sasaParameters[atj].Ripr2 - c_sasaParameters[ati].Ripr2)/B.w; float mult = (1.0f + B.w)/dr.w; B.x = tex1Dfetch(t_sasaPiOverSi, ati); B.y = tex1Dfetch(t_sasaRipr, ati); mult *= B.x;//c_sasaParameters[ati].piOverSi; mult *= pij; mult *= M_PI; mult *= B.y;//c_sasaParameters[ati].Ripr; B.x = mult*dr.x; B.y = mult*dr.y; B.z = mult*dr.z; c_sasaData.d_BGrad[i*c_sasaData.widthTot + d_i] = B; B.x = tex1Dfetch(t_sasaPiOverSi, atj); B.y = tex1Dfetch(t_sasaRipr, atj); mult = (1.0f - B.w)/dr.w; mult *= B.x;//c_sasaParameters[atj].piOverSi; mult *= pij; mult *= M_PI; mult *= B.y;//c_sasaParameters[atj].Ripr; B.x = mult*dr.x; B.y = mult*dr.y; B.z = mult*dr.z; c_sasaData.d_BGradT[i*c_sasaData.widthTot + d_i] = B; dr.x = tex1Dfetch(t_sasaRipr, ati); dr.y = tex1Dfetch(t_sasaRipr, atj); dr.z = tex1Dfetch(t_sasaPiOverSi, ati); //B.x = c_sasaParameters[ati].Ripr; //B.x += c_sasaParameters[atj].Ripr; B.x = dr.x + dr.y; B.x -= dr.w; B.x *= M_PI; B.x *= pij; //B.y = c_sasaParameters[atj].Ripr; //B.y -= c_sasaParameters[ati].Ripr; B.y = dr.y - dr.x; B.y /= dr.w; dr.w = tex1Dfetch(t_sasaPiOverSi, atj); B.w = 1.0f + B.y; B.w *= B.x; B.w *= dr.x;//c_sasaParameters[ati].Ripr; B.w *= dr.z;//c_sasaParameters[ati].piOverSi; B.w = 1.0f - B.w; c_sasaData.d_BGrad[i*c_sasaData.widthTot + d_i].w = B.w; c_sasaData.d_B[i*c_sasaData.widthTot + d_i] = B.w; //c_sasaData.d_BGradT[i*c_sasaData.widthTot + d_i].w = // 1.0f - c_sasaParameters[atj].piOverSi*c_sasaParameters[atj].Ripr*B.x*(1.0f - B.y); c_sasaData.d_BGradT[i*c_sasaData.widthTot + d_i].w = 1.0f - dr.w*dr.y*B.x*(1.0f - B.y); //c_sasaData.d_BT[sasaCount*c_gsystem.N + d_i] = B.w; } } } __global__ void computeSASAPotentialEnergy_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_sasaData.threadsCountTot){ int a1 = c_sasaData.d_threadAtom[d_i]; int j; int ati = c_gsystem.d_atomTypes[a1]; int sasaCount = c_sasaData.d_sasaListCount[d_i]; float pot = tex1Dfetch(t_sasaSigmaSi, ati);//c_sasaParameters[ati].sigmaSi; for(j = 0; j < sasaCount; j++){ pot *= c_sasaData.d_B[j*c_sasaData.widthTot + d_i];//tex1Dfetch(t_sasaB, j*c_gsystem.N + d_i); } c_sasaData.d_sasaEnergies[a1] = pot; } } __global__ void computeSASA_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_sasaData.threadsCountTot){ int a1 = c_sasaData.d_threadAtom[d_i]; int j; int ati = c_gsystem.d_atomTypes[a1]; int sasaCount = c_sasaData.d_sasaListCount[d_i]; float pot = c_sasaData.d_sasaSi[ati];//c_sasaParameters[ati].sigmaSi; for(j = 0; j < sasaCount; j++){ pot *= c_sasaData.d_B[j*c_sasaData.widthTot + d_i];//tex1Dfetch(t_sasaB, j*c_gsystem.N + d_i); } c_sasaData.d_sasa[a1] = pot; } } __global__ void computeSASAPotential_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_sasaData.threadsCountTot){ int j, k; int a1 = c_sasaData.d_threadAtom[d_i]; float4 f = c_gsystem.d_forces[a1]; float mult; float4 B; float Vi = c_sasaData.d_sasaEnergies[a1]; for(k = 0; k < c_sasaData.d_sasaListCount[d_i]; k++){ j = c_sasaData.d_sasaList[k*c_sasaData.widthTot + d_i]; B = c_sasaData.d_BGrad[k*c_sasaData.widthTot + d_i]; mult = Vi/B.w; f.x += mult*B.x; f.y += mult*B.y; f.z += mult*B.z; B = c_sasaData.d_BGradT[k*c_sasaData.widthTot + d_i]; mult = c_sasaData.d_sasaEnergies[j]; mult /= B.w; f.x += mult*B.x; f.y += mult*B.y; f.z += mult*B.z; } c_gsystem.d_forces[a1] = f; } } inline void compute(){ hipLaunchKernelGGL(( generateSASAList_kernel), dim3(sasaBlockCount), dim3(sasaBlockSize), 0, 0, ); hipLaunchKernelGGL(( computeSASAPotentialB_kernel), dim3(sasaBlockCount), dim3(sasaBlockSize), 0, 0, ); /*hipMemcpy(sasaData.h_sasaListCount, sasaData.d_sasaListCount, gsystem.Ntot*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(sasaData.h_BGrad, sasaData.d_BGrad, gsystem.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float4), hipMemcpyDeviceToHost); int i, j; printf("\n\nB's:\n"); for(i = 0; i < gsystem.Ntot; i++){ printf("%d (%d): ", i, sasaData.h_sasaListCount[i]); for(j = 0; j < sasaData.h_sasaListCount[i]; j++){ printf("%5.2f ", sasaData.h_BGrad[j*gsystem.widthTot + i].w); } printf("\n"); }*/ hipLaunchKernelGGL(( computeSASAPotentialEnergy_kernel), dim3(sasaBlockCount), dim3(sasaBlockSize), 0, 0, ); hipLaunchKernelGGL(( computeSASAPotential_kernel), dim3(sasaBlockCount), dim3(sasaBlockSize), 0, 0, ); /*hipMemcpy(gsystem.h_forces, gsystem.d_forces, gsystem.Ntot*sizeof(float4), hipMemcpyDeviceToHost); //int i; float3 force = make_float3(0.0f, 0.0f, 0.0f); for(i = 0; i < gsystem.Ntot; i++){ force.x += gsystem.h_forces[i].x; force.y += gsystem.h_forces[i].y; force.z += gsystem.h_forces[i].z; printf("%d: (%f, %f, %f) %f\n", i, gsystem.h_forces[i].x, gsystem.h_forces[i].y, gsystem.h_forces[i].z, sqrtf(gsystem.h_forces[i].x*gsystem.h_forces[i].x + gsystem.h_forces[i].y*gsystem.h_forces[i].y + gsystem.h_forces[i].z*gsystem.h_forces[i].z)); } printf("Net force (sasa): (%f, %f, %f) %f\n", force.x, force.y, force.z, sqrtf(force.x*force.x + force.y*force.y + force.z*force.z));*/ /*computeSASA_kernel<<<sasaBlockCount, sasaBlockSize>>>(); hipMemcpy(sasaData.h_sasa, sasaData.d_sasa, gsystem.Ntot*sizeof(float), hipMemcpyDeviceToHost); float totalSASA = 0.0; FILE* file = fopen("sasa.dat", "w"); int i; for(i = 0; i < gsystem.Ntot; i++){ if(sasaData.h_atomThread[i] != -1){ fprintf(file, "%d\t%f\n", i, sasaData.h_sasa[i]); totalSASA += sasaData.h_sasa[i]; } } fclose(file); printf("Total SASA: %f\n", totalSASA);*/ //exit(0); } inline void computeEnergy(){ if(step == 0){ updateSASAPairsList(); hipLaunchKernelGGL(( generateSASAList_kernel), dim3(sasaBlockCount), dim3(sasaBlockSize), 0, 0, ); hipLaunchKernelGGL(( computeSASAPotentialB_kernel), dim3(sasaBlockCount), dim3(sasaBlockSize), 0, 0, ); hipLaunchKernelGGL(( computeSASAPotentialEnergy_kernel), dim3(sasaBlockCount), dim3(sasaBlockSize), 0, 0, ); } hipMemcpy(sasaData.h_sasaEnergies, sasaData.d_sasaEnergies, gsystem.Ntot*sizeof(float), hipMemcpyDeviceToHost); checkCUDAError("sasa - Energy copy"); int i, traj; for(traj = 0; traj < parameters.Ntr; traj++){ float pot = 0.0f; for(i = 0; i < gsystem.N; i++){ pot += sasaData.h_sasaEnergies[i + gsystem.N*traj]; } energyOutput.values[traj] = pot; } /*hipMemcpy(sasaData.h_pairs12Counts, sasaData.d_pairs12Counts, sasaData.threadsCountTot*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(sasaData.h_sasaListCount, sasaData.d_sasaListCount, sasaData.threadsCountTot*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(sasaData.h_sasaList, sasaData.d_sasaList, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(int), hipMemcpyDeviceToHost); printf("\n"); for(i = 0; i < sasaData.threadsCount; i++){ int a1 = sasaData.h_threadAtom[i]; int j; printf("%d (%s, %d in list, %d cov): ", a1, topology.atoms[a1].name, sasaData.h_sasaListCount[i], sasaData.h_pairs12Counts[i]); for(j = 0; j < sasaData.h_sasaListCount[i]; j++){ printf("%d ", sasaData.h_sasaList[j*sasaData.widthTot + i]); } printf("\n"); }*/ } void destroy(){ } __global__ void updateSASAPairsList_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_sasaData.threadsCountTot){ int i, j, found; int a1, a2; a1 = c_sasaData.d_threadAtom[d_i]; float4 r1 = tex1Dfetch(t_coord, a1); float4 r2; int sasaPairsCount = 0; int traj = d_i/c_sasaData.threadsCount; for(i = traj*c_sasaData.threadsCount; i < (traj + 1)*c_sasaData.threadsCount; i++){ a2 = c_sasaData.d_threadAtom[i]; if(a2 != a1){ r2 = tex1Dfetch(t_coord, a2); r2 -= r1; DO_PBC(r2); r2.w = sqrtf(r2.x*r2.x + r2.y*r2.y + r2.z*r2.z); if(r2.w < c_sasaData.sasaPairsListCutoff){ int covalentCount = c_sasaData.d_pairs12Counts[d_i]; j = 0; found = 0; while(found == 0 && j < covalentCount){ if(a2 == c_sasaData.d_sasaList[j*c_sasaData.widthTot + d_i]){ found = 1; } j++; } //float pij; if(found == 0){ //pij = c_sasaData.pij_nb; /* j = -a2; } else {*/ // j = a2; //pij = c_sasaData.pij_cov; c_sasaData.d_pairsList[sasaPairsCount*c_sasaData.widthTot + d_i] = a2; sasaPairsCount ++; } //c_sasaData.d_pairsListPij[sasaPairsCount*c_sasaData.widthTot + d_i] = pij; /*if(found == 0){ c_sasaData.d_pairsList[sasaPairsCount*c_sasaData.widthTot + d_i] = i; sasaPairsCount ++; }*/ } } } c_sasaData.d_pairsListCount[d_i] = sasaPairsCount; } } inline void updateSASAPairsList(){ hipLaunchKernelGGL(( updateSASAPairsList_kernel), dim3(sasaPairsListBlockCount), dim3(sasaPairsListBlockSize), 0, 0, ); hipMemcpy(sasaData.h_pairsListCount, sasaData.d_pairsListCount, sasaData.threadsCountTot*sizeof(int), hipMemcpyDeviceToHost); int i, traj; for(traj = 0; traj < parameters.Ntr; traj++){ for(i = 0; i < sasaData.threadsCount; i++){ if(sasaData.h_pairsListCount[i] > MAX_SASA_PAIRSLIST_ITEMS_PER_ATOM){ DIE("Number of SASA pairs on atom %d (trajectory %d) is %d, which exceeds the limit of %d.", sasaData.h_threadAtom[i], traj+parameters.firstrun, sasaData.h_pairsListCount[i], MAX_SASA_PAIRSLIST_ITEMS_PER_ATOM); } } } hipDeviceSynchronize(); checkCUDAError("update sasa pairs"); hipMemcpy(sasaData.h_pairsListCount, sasaData.d_pairsListCount, sasaData.threadsCountTot*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(sasaData.h_pairsList, sasaData.d_pairsList, sasaData.widthTot*sasaData.maxPairsListItemsPerAtom*sizeof(int), hipMemcpyDeviceToHost); /*int j; printf("\n\nList of SASA pairs:\n"); for(i = 0; i < sasaData.threadsCountTot; i++){ if(i % sasaData.threadsCount == 0){ printf("%d. %d (%d): ", i, sasaData.h_threadAtom[i], sasaData.h_pairsListCount[i]); for(j = 0; j < sasaData.h_pairsListCount[i]; j++){ printf("%d ", sasaData.h_pairsList[j*sasaData.widthTot + i]); } printf("\n"); } } for(j = 0; j < sasaData.threadsCount; j++){ if(sasaData.h_pairsListCount[j] != sasaData.h_pairsListCount[j+traj*sasaData.threadsCount]){ DIE("Sukanah!"); } }*/ } void destroySASAPairsListUpdater(){ } #undef LOG } // namespace sasa_potential
6d93a47dd86fc46e3b20663870a735e1c3f381ef.cu
/* * SASAPotential.cu * * Created on: Aug 6, 2010 * Author: zhmurov */ #include "../Core/global.h" #include "../Core/md.cuh" #include "sasa.h" #include "SASAPotential.cuh" namespace sasa_potential { class Log: public ILog { virtual void Write(const char* message) const { std::cout << makeTimePrefix() << "<sasa_potential> " << message << std::endl; } } log; #define LOG LogStream(log) //__global__ void computeSASAPotentialB_kernel(); void create(){ if (!getYesNoParameter(PARAMETER_SASA_ON, 0)) return; potential.compute = &compute; potential.destroy = &destroy; sprintf(potential.name, "SASA potential"); potentials[potentialsCount] = &potential; potentialsCount ++; sasaPairsListUpdater.update = updateSASAPairsList; sasaPairsListUpdater.destroy = destroySASAPairsListUpdater; sasaPairsListUpdater.frequency = getIntegerParameter(PARAMETER_SASA_PAIRS_FREQ); sprintf(sasaPairsListUpdater.name, "SASA updater"); updaters[updatersCount] = &sasaPairsListUpdater; updatersCount ++; energyOutput.computeValues = &computeEnergy; allocateCPU((void**)&energyOutput.values, parameters.Ntr*sizeof(float)); strcpy(energyOutput.name, ENERGY_OUTPUT_NAME_SASA); energyOutputs[energyOutputsCount] = &energyOutput; energyOutputsCount ++; //if(deviceProps.major == 2){ //cudaFuncSetCacheConfig(computeSASAPotentialB_kernel, cudaFuncCachePreferL1); //} init(); } void init(){ LOG << "Initializing SASA implicit solvent potential..."; int i, j; pairlistsExtension = getIntegerParameter(PARAMETER_PAIRSLISTS_EXTENSION, DEFAULT_PAIRSLISTS_EXTENSION); sasaData.Rprobe = getFloatParameter(PARAMETER_SASA_RPROBE, DEFAULT_SASA_RPROBE); sasaData.pij_cov = getFloatParameter(PARAMETER_SASA_PIJ_COV, DEFAULT_SASA_PIJ_COV); sasaData.pij_nb = getFloatParameter(PARAMETER_SASA_PIJ_NB, DEFAULT_SASA_PIJ_NB); sasaData.sasaPairsListCutoff = getFloatParameter(PARAMETER_SASA_PAIRSLIST_CUTOFF); allocateCPU((void**)&sasaData.h_threadAtom, gsystem.Ntot*sizeof(int)); allocateGPU((void**)&sasaData.d_threadAtom, gsystem.Ntot*sizeof(int)); allocateCPU((void**)&sasaData.h_atomThread, gsystem.Ntot*sizeof(int)); allocateGPU((void**)&sasaData.d_atomThread, gsystem.Ntot*sizeof(int)); int hydrogenCount = 0; sasaData.threadsCount = 0; for(i = 0; i < gsystem.N; i++){ if(topology.atoms[i].type[0] == 'H'){ hydrogenCount ++; sasaData.h_atomThread[i] = -1; } else { sasaData.h_atomThread[i] = sasaData.threadsCount; sasaData.h_threadAtom[sasaData.threadsCount] = i; sasaData.threadsCount ++; } } /* for(i = 0; i < sasaData.threadsCount; i++){ printf("%d: %s - %d\n", i, topology.atoms[sasaData.h_threadAtom[i]].name, sasaData.h_threadAtom[i]); }*/ sasaData.threadsCountTot = sasaData.threadsCount*parameters.Ntr; sasaBlockSize = BLOCK_SIZE; sasaBlockCount = sasaData.threadsCountTot/BLOCK_SIZE + 1; sasaPairsListBlockSize = BLOCK_SIZE; sasaPairsListBlockCount = sasaData.threadsCountTot/BLOCK_SIZE + 1; if(sasaData.threadsCountTot % 16 == 0){ sasaData.widthTot = sasaData.threadsCountTot; } else { sasaData.widthTot = (sasaData.threadsCountTot/16)*16 + 16; } LOG << "Found " << hydrogenCount << " hydrogens, " << gsystem.N - hydrogenCount << " non-hydrogens"; LOG << "Arrays will be aligned to the width of " << sasaData.widthTot; //allocateCPU((void**)&sasaData.h_sasaParameters, atomTypesCount*sizeof(GSASAParameters)); //allocateGPU((void**)&sasaData.d_sasaParameters, atomTypesCount*sizeof(GSASAParameters)); allocateCPU((void**)&sasaData.h_sasaRipr, atomTypesCount*sizeof(float)); allocateCPU((void**)&sasaData.h_sasaRipr2, atomTypesCount*sizeof(float)); allocateCPU((void**)&sasaData.h_sasaPiOverSi, atomTypesCount*sizeof(float)); allocateCPU((void**)&sasaData.h_sasaSigmaSi, atomTypesCount*sizeof(float)); allocateCPU((void**)&sasaData.h_sasaSi, atomTypesCount*sizeof(float)); allocateGPU((void**)&sasaData.d_sasaRipr, atomTypesCount*sizeof(float)); allocateGPU((void**)&sasaData.d_sasaRipr2, atomTypesCount*sizeof(float)); allocateGPU((void**)&sasaData.d_sasaPiOverSi, atomTypesCount*sizeof(float)); allocateGPU((void**)&sasaData.d_sasaSigmaSi, atomTypesCount*sizeof(float)); allocateGPU((void**)&sasaData.d_sasaSi, atomTypesCount*sizeof(float)); char sasaFilename[100]; getMaskedParameter(sasaFilename, PARAMETER_SASA_PARAMETERS_FILE); readSASAParameters(sasaFilename); for(i = 0; i < atomTypesCount; i++){ SASAParameters param = getSASAParametersCHARMM(atomTypes[i].name); float Ripr = (param.R + sasaData.Rprobe); float Si = 4.0f*M_PI*Ripr*Ripr; //sasaData.h_sasaParameters[i].Ripr = Ripr; //sasaData.h_sasaParameters[i].Ripr2 = Ripr*Ripr; //sasaData.h_sasaParameters[i].piOverSi = param.p/Si; //sasaData.h_sasaParameters[i].sigmaSi = param.sigma*Si; sasaData.h_sasaRipr[i] = Ripr;//sasaData.h_sasaParameters[i].Ripr; sasaData.h_sasaRipr2[i] = Ripr*Ripr;//sasaData.h_sasaParameters[i].Ripr2; sasaData.h_sasaPiOverSi[i] = param.p/Si;//sasaData.h_sasaParameters[i].piOverSi; sasaData.h_sasaSigmaSi[i] = param.sigma*Si;//sasaData.h_sasaParameters[i].sigmaSi; sasaData.h_sasaSi[i] = Si; } allocateCPU((void**)&sasaData.h_pairs12Counts, sasaData.threadsCountTot*sizeof(int)); allocateGPU((void**)&sasaData.d_pairs12Counts, sasaData.threadsCountTot*sizeof(int)); allocateCPU((void**)&sasaData.h_pairsListCount, sasaData.threadsCountTot*sizeof(int)); allocateGPU((void**)&sasaData.d_pairsListCount, sasaData.threadsCountTot*sizeof(int)); allocateCPU((void**)&sasaData.h_sasaListCount, sasaData.threadsCountTot*sizeof(int)); allocateGPU((void**)&sasaData.d_sasaListCount, sasaData.threadsCountTot*sizeof(int)); LOG << "Counting members of current SASA pairlist..."; int totalSASAPairlistCount = 0; int totalSASAListCount = 0; for(i = 0; i < sasaData.threadsCountTot; i++){ sasaData.h_pairsListCount[i] = 0; sasaData.h_sasaListCount[i] = 0; } sasaData.maxSASAPairsPerAtom = getIntegerParameter(PARAMETER_MAX_PAIRS_SASA, 0); sasaData.maxPairsListItemsPerAtom = getIntegerParameter(PARAMETER_MAX_PAIRLIST_SASA, 0); if((sasaData.maxSASAPairsPerAtom != 0 || sasaData.maxPairsListItemsPerAtom != 0) && (sasaData.maxSASAPairsPerAtom * sasaData.maxPairsListItemsPerAtom == 0)){ LOG << "Sizes for both SASA lists should be defined. " "Re-counting will be forced."; } if(sasaData.maxSASAPairsPerAtom == 0 || sasaData.maxPairsListItemsPerAtom == 0){ float4 r1, r2; int a1, a2; for(i = 0; i < sasaData.threadsCount; i++){ a1 = sasaData.h_threadAtom[i]; if(topology.atoms[a1].type[0] != 'H'){ r1 = gsystem.h_coord[a1]; for(j = 0; j < i; j++){ a2 = sasaData.h_threadAtom[j]; if(topology.atoms[a2].type[0] != 'H'){ r2 = gsystem.h_coord[a2]; r2.x = r2.x - r1.x; r2.y = r2.y - r1.y; r2.z = r2.z - r1.z; r2.w = sqrtf(r2.x*r2.x + r2.y*r2.y + r2.z*r2.z); if(r2.w < sasaData.sasaPairsListCutoff){ sasaData.h_pairsListCount[i] ++; sasaData.h_pairsListCount[j] ++; totalSASAPairlistCount ++; if(r2.w < sasaData.h_sasaRipr[gsystem.h_atomTypes[a1]] + sasaData.h_sasaRipr[gsystem.h_atomTypes[a2]]){ sasaData.h_sasaListCount[i] ++; sasaData.h_sasaListCount[j] ++; totalSASAListCount ++; } } } } } } sasaData.maxPairsListItemsPerAtom = 0; sasaData.maxSASAPairsPerAtom = 0; for(i = 0; i < sasaData.threadsCount; i++){ if(sasaData.maxPairsListItemsPerAtom < sasaData.h_pairsListCount[i]){ sasaData.maxPairsListItemsPerAtom = sasaData.h_pairsListCount[i]; } if(sasaData.maxSASAPairsPerAtom < sasaData.h_sasaListCount[i]){ sasaData.maxSASAPairsPerAtom = sasaData.h_sasaListCount[i]; } } const int tw = 10; // Width of table field LOG << ""; LOG.table(tw,3) << "Pairs" << "Total" << "Max/Atom"; LOG.table(tw,3) << "SASA" << totalSASAListCount << sasaData.maxSASAPairsPerAtom; LOG.table(tw,3) << "Pairlist" << totalSASAPairlistCount << sasaData.maxPairsListItemsPerAtom; LOG << ""; } else { LOG << "Using pre-defined list sizes of " << sasaData.maxSASAPairsPerAtom << " for SASA list and " << sasaData.maxPairsListItemsPerAtom << " for SASA pairlist"; LOG << "Actual numbers of pairs in changeable lists were not yet computed and will be zeros in following table."; } DPRINTF("Pairs: Total: Max/Atom:\n"); DPRINTF("SASA List %10d %10d\n", totalSASAListCount, sasaData.maxSASAPairsPerAtom); DPRINTF("Pairlist %10d %10d\n", totalSASAPairlistCount, sasaData.maxPairsListItemsPerAtom); LOG << "Adding " << pairlistsExtension << " to the length of all changeable lists to avoid memory conflicts"; sasaData.maxSASAPairsPerAtom += 4; //For covalent bonds sasaData.maxSASAPairsPerAtom += pairlistsExtension; sasaData.maxPairsListItemsPerAtom += pairlistsExtension; allocateCPU((void**)&sasaData.h_pairsList, sasaData.widthTot*sasaData.maxPairsListItemsPerAtom*sizeof(int)); allocateGPU((void**)&sasaData.d_pairsList, sasaData.widthTot*sasaData.maxPairsListItemsPerAtom*sizeof(int)); allocateCPU((void**)&sasaData.h_BGrad, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float4)); allocateGPU((void**)&sasaData.d_BGrad, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float4)); allocateCPU((void**)&sasaData.h_B, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float)); allocateGPU((void**)&sasaData.d_B, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float)); allocateCPU((void**)&sasaData.h_BGradT, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float4)); allocateGPU((void**)&sasaData.d_BGradT, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float4)); allocateCPU((void**)&sasaData.h_BT, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float)); allocateGPU((void**)&sasaData.d_BT, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float)); allocateCPU((void**)&sasaData.h_sasaList, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(int)); allocateGPU((void**)&sasaData.d_sasaList, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(int)); for(i = 0; i < sasaData.threadsCount; i++){ sasaData.h_pairs12Counts[i] = 0; } int b; for(b = 0; b < topology.bondCount; b++){ Bond bond = topology.bonds[b]; i = sasaData.h_atomThread[bond.i]; j = sasaData.h_atomThread[bond.j]; if(topology.atoms[bond.i].type[0] != 'H' && topology.atoms[bond.j].type[0] != 'H'){ sasaData.h_sasaList[sasaData.h_pairs12Counts[i]*sasaData.widthTot + i] = bond.j; sasaData.h_sasaList[sasaData.h_pairs12Counts[j]*sasaData.widthTot + j] = bond.i; sasaData.h_pairs12Counts[i] ++; sasaData.h_pairs12Counts[j] ++; } } /*for(i = 0; i < sasaData.threadsCount; i++){ a1 = sasaData.h_threadAtom[i]; sasaData.h_pairs12Counts[i] = pairsListsData.h_pairs12ListCount[a1]; //printf("(%s)%d\n", topology.atoms[a1].name, sasaData.h_pairs12Counts[i]); for(j = 0; j < pairsListsData.h_pairs12ListCount[a1]; j++){ sasaData.h_sasaList[j*sasaData.widthTot + i] = pairsListsData.h_pairs12List[j*gsystem.widthTot + a1]; } }*/ allocateCPU((void**)&sasaData.h_sasa, gsystem.Ntot*sizeof(float)); allocateGPU((void**)&sasaData.d_sasa, gsystem.Ntot*sizeof(float)); allocateCPU((void**)&sasaData.h_sasaEnergies, gsystem.Ntot*sizeof(float)); allocateGPU((void**)&sasaData.d_sasaEnergies, gsystem.Ntot*sizeof(float)); for(i = 0; i < gsystem.Ntot; i++){ sasaData.h_sasaEnergies[i] = 0.0f; } /*printf("\n\nList of Covalent Bonds (in SASA):\n"); for(i = 0; i < sasaData.threadsCount; i++){ printf("%d (%d): ", i, sasaData.h_pairs12Counts[i]); for(j = 0; j < sasaData.h_pairs12Counts[i]; j++){ printf("%d ", sasaData.h_sasaList[j*sasaData.widthTot + i]); } printf("\n"); }*/ int traj, itot; for(traj = 1; traj < parameters.Ntr; traj++){ for(i = 0; i < sasaData.threadsCount; i++){ itot = traj*sasaData.threadsCount + i; sasaData.h_threadAtom[itot] = sasaData.h_threadAtom[i] + traj*gsystem.N; sasaData.h_pairs12Counts[itot] = sasaData.h_pairs12Counts[i]; for(j = 0; j < sasaData.h_pairs12Counts[i]; j++){ sasaData.h_sasaList[j*sasaData.widthTot + itot] = sasaData.h_sasaList[j*sasaData.widthTot + i] + traj*gsystem.N; } } } DPRINTF("SASA Parameters:\n"); DPRINTF("Atom type:\tR_ipr: \t\t(R_i+R_pr)^2: \t\tp_i/S_i: \t\tsigma*S_i\n"); for(i = 0; i < atomTypesCount; i++){ DPRINTF("%d.%s\t\t%6.4f\t\t%6.4f\t\t%6.4f\t\t%6.4f\n", i, atomTypes[i].name, sasaData.h_sasaRipr[i], sasaData.h_sasaRipr2[i], sasaData.h_sasaPiOverSi[i], sasaData.h_sasaSigmaSi[i]); } //cudaMemcpy(sasaData.d_sasaParameters, sasaData.h_sasaParameters, // atomTypesCount*sizeof(GSASAParameters), cudaMemcpyHostToDevice); cudaMemcpy(sasaData.d_sasaRipr, sasaData.h_sasaRipr, atomTypesCount*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(sasaData.d_sasaRipr2, sasaData.h_sasaRipr2, atomTypesCount*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(sasaData.d_sasaPiOverSi, sasaData.h_sasaPiOverSi, atomTypesCount*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(sasaData.d_sasaSigmaSi, sasaData.h_sasaSigmaSi, atomTypesCount*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(sasaData.d_sasaSi, sasaData.h_sasaSi, atomTypesCount*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(sasaData.d_threadAtom, sasaData.h_threadAtom, sasaData.threadsCountTot*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(sasaData.d_pairs12Counts, sasaData.h_pairs12Counts, sasaData.threadsCountTot*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(sasaData.d_sasaList, sasaData.h_sasaList, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(sasaData.d_sasaEnergies, sasaData.h_sasaEnergies, gsystem.Ntot*sizeof(float), cudaMemcpyHostToDevice); /*cudaMemcpyToSymbol(c_sasaParameters, sasaData.h_sasaParameters, atomTypesCount*sizeof(GSASAParameters), 0);*/ cudaMemcpyToSymbol(c_sasaData, &sasaData, sizeof(GSASAData), 0); cudaBindTexture(0, t_sasaRipr, sasaData.d_sasaRipr, atomTypesCount*sizeof(float)); cudaBindTexture(0, t_sasaRipr2, sasaData.d_sasaRipr2, atomTypesCount*sizeof(float)); cudaBindTexture(0, t_sasaPiOverSi, sasaData.d_sasaPiOverSi, atomTypesCount*sizeof(float)); cudaBindTexture(0, t_sasaSigmaSi, sasaData.d_sasaSigmaSi, atomTypesCount*sizeof(float)); /*cudaBindTexture(0, t_sasaBGrad, sasaData.d_BGrad, gsystem.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float4)); cudaBindTexture(0, t_sasaB, sasaData.d_B, gsystem.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float));*/ LOG << "Done initializing SASA implicit solvent potential."; } __global__ void generateSASAList_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_sasaData.threadsCountTot){ int i = c_sasaData.d_threadAtom[d_i]; float4 r1 = tex1Dfetch(t_coord, i); r1.w = tex1Dfetch(t_sasaRipr, (int)r1.w);//c_sasaParameters[(int)r1.w].Ripr; int sasaCount = c_sasaData.d_pairs12Counts[d_i]; for(i = 0; i < c_sasaData.d_pairsListCount[d_i]; i++){ float mult; float4 r2; int j = c_sasaData.d_pairsList[i*c_sasaData.widthTot + d_i]; r2 = tex1Dfetch(t_coord, j);//abs(j)); mult = tex1Dfetch(t_sasaRipr, (int)r2.w);//c_sasaParameters[(int)r2.w].Ripr; mult += r1.w; r2 -= r1; DO_PBC(r2); r2.w = sqrtf(r2.x*r2.x + r2.y*r2.y + r2.z*r2.z); if(r2.w < mult){ c_sasaData.d_sasaList[sasaCount*c_sasaData.widthTot + d_i] = j; /*c_sasaData.d_sasaListPij[sasaCount*c_sasaData.widthTot + d_i] = c_sasaData.d_pairsListPij[i*c_sasaData.widthTot + d_i];*/ //c_sasaData.d_sasadrs[sasaCount*c_sasaData.widthTot + d_i] = r2; sasaCount ++; } } c_sasaData.d_sasaListCount[d_i] = sasaCount; } } __global__ void computeSASAPotentialB_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_sasaData.threadsCountTot){ int i = c_sasaData.d_threadAtom[d_i]; int ati = c_gsystem.d_atomTypes[i]; float4 B; float4 dr; s_Ri[threadIdx.x] = tex1Dfetch(t_coord, i); int atj; float pij; int covalentCount = c_sasaData.d_pairs12Counts[d_i]; for(i = 0; i < c_sasaData.d_sasaListCount[d_i]; i++){ atj = c_sasaData.d_sasaList[i*c_sasaData.widthTot + d_i]; if(i < covalentCount){ pij = c_sasaData.pij_cov; } else { pij = c_sasaData.pij_nb; } //atj = abs(atj); //float4 dr = c_sasaData.d_sasadrs[i*c_sasaData.widthTot + d_i]; B = s_Ri[threadIdx.x]; dr = tex1Dfetch(t_coord, atj); dr -= B; DO_PBC(dr); dr.w = sqrtf(dr.x*dr.x + dr.y*dr.y + dr.z*dr.z); atj = c_gsystem.d_atomTypes[atj]; B.w = dr.w*dr.w; B.x = tex1Dfetch(t_sasaRipr2, ati); B.y = tex1Dfetch(t_sasaRipr2, atj); B.w = (B.y - B.x)/B.w; //B.w = (c_sasaParameters[atj].Ripr2 - c_sasaParameters[ati].Ripr2)/B.w; float mult = (1.0f + B.w)/dr.w; B.x = tex1Dfetch(t_sasaPiOverSi, ati); B.y = tex1Dfetch(t_sasaRipr, ati); mult *= B.x;//c_sasaParameters[ati].piOverSi; mult *= pij; mult *= M_PI; mult *= B.y;//c_sasaParameters[ati].Ripr; B.x = mult*dr.x; B.y = mult*dr.y; B.z = mult*dr.z; c_sasaData.d_BGrad[i*c_sasaData.widthTot + d_i] = B; B.x = tex1Dfetch(t_sasaPiOverSi, atj); B.y = tex1Dfetch(t_sasaRipr, atj); mult = (1.0f - B.w)/dr.w; mult *= B.x;//c_sasaParameters[atj].piOverSi; mult *= pij; mult *= M_PI; mult *= B.y;//c_sasaParameters[atj].Ripr; B.x = mult*dr.x; B.y = mult*dr.y; B.z = mult*dr.z; c_sasaData.d_BGradT[i*c_sasaData.widthTot + d_i] = B; dr.x = tex1Dfetch(t_sasaRipr, ati); dr.y = tex1Dfetch(t_sasaRipr, atj); dr.z = tex1Dfetch(t_sasaPiOverSi, ati); //B.x = c_sasaParameters[ati].Ripr; //B.x += c_sasaParameters[atj].Ripr; B.x = dr.x + dr.y; B.x -= dr.w; B.x *= M_PI; B.x *= pij; //B.y = c_sasaParameters[atj].Ripr; //B.y -= c_sasaParameters[ati].Ripr; B.y = dr.y - dr.x; B.y /= dr.w; dr.w = tex1Dfetch(t_sasaPiOverSi, atj); B.w = 1.0f + B.y; B.w *= B.x; B.w *= dr.x;//c_sasaParameters[ati].Ripr; B.w *= dr.z;//c_sasaParameters[ati].piOverSi; B.w = 1.0f - B.w; c_sasaData.d_BGrad[i*c_sasaData.widthTot + d_i].w = B.w; c_sasaData.d_B[i*c_sasaData.widthTot + d_i] = B.w; //c_sasaData.d_BGradT[i*c_sasaData.widthTot + d_i].w = // 1.0f - c_sasaParameters[atj].piOverSi*c_sasaParameters[atj].Ripr*B.x*(1.0f - B.y); c_sasaData.d_BGradT[i*c_sasaData.widthTot + d_i].w = 1.0f - dr.w*dr.y*B.x*(1.0f - B.y); //c_sasaData.d_BT[sasaCount*c_gsystem.N + d_i] = B.w; } } } __global__ void computeSASAPotentialEnergy_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_sasaData.threadsCountTot){ int a1 = c_sasaData.d_threadAtom[d_i]; int j; int ati = c_gsystem.d_atomTypes[a1]; int sasaCount = c_sasaData.d_sasaListCount[d_i]; float pot = tex1Dfetch(t_sasaSigmaSi, ati);//c_sasaParameters[ati].sigmaSi; for(j = 0; j < sasaCount; j++){ pot *= c_sasaData.d_B[j*c_sasaData.widthTot + d_i];//tex1Dfetch(t_sasaB, j*c_gsystem.N + d_i); } c_sasaData.d_sasaEnergies[a1] = pot; } } __global__ void computeSASA_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_sasaData.threadsCountTot){ int a1 = c_sasaData.d_threadAtom[d_i]; int j; int ati = c_gsystem.d_atomTypes[a1]; int sasaCount = c_sasaData.d_sasaListCount[d_i]; float pot = c_sasaData.d_sasaSi[ati];//c_sasaParameters[ati].sigmaSi; for(j = 0; j < sasaCount; j++){ pot *= c_sasaData.d_B[j*c_sasaData.widthTot + d_i];//tex1Dfetch(t_sasaB, j*c_gsystem.N + d_i); } c_sasaData.d_sasa[a1] = pot; } } __global__ void computeSASAPotential_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_sasaData.threadsCountTot){ int j, k; int a1 = c_sasaData.d_threadAtom[d_i]; float4 f = c_gsystem.d_forces[a1]; float mult; float4 B; float Vi = c_sasaData.d_sasaEnergies[a1]; for(k = 0; k < c_sasaData.d_sasaListCount[d_i]; k++){ j = c_sasaData.d_sasaList[k*c_sasaData.widthTot + d_i]; B = c_sasaData.d_BGrad[k*c_sasaData.widthTot + d_i]; mult = Vi/B.w; f.x += mult*B.x; f.y += mult*B.y; f.z += mult*B.z; B = c_sasaData.d_BGradT[k*c_sasaData.widthTot + d_i]; mult = c_sasaData.d_sasaEnergies[j]; mult /= B.w; f.x += mult*B.x; f.y += mult*B.y; f.z += mult*B.z; } c_gsystem.d_forces[a1] = f; } } inline void compute(){ generateSASAList_kernel<<<sasaBlockCount, sasaBlockSize>>>(); computeSASAPotentialB_kernel<<<sasaBlockCount, sasaBlockSize>>>(); /*cudaMemcpy(sasaData.h_sasaListCount, sasaData.d_sasaListCount, gsystem.Ntot*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(sasaData.h_BGrad, sasaData.d_BGrad, gsystem.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(float4), cudaMemcpyDeviceToHost); int i, j; printf("\n\nB's:\n"); for(i = 0; i < gsystem.Ntot; i++){ printf("%d (%d): ", i, sasaData.h_sasaListCount[i]); for(j = 0; j < sasaData.h_sasaListCount[i]; j++){ printf("%5.2f ", sasaData.h_BGrad[j*gsystem.widthTot + i].w); } printf("\n"); }*/ computeSASAPotentialEnergy_kernel<<<sasaBlockCount, sasaBlockSize>>>(); computeSASAPotential_kernel<<<sasaBlockCount, sasaBlockSize>>>(); /*cudaMemcpy(gsystem.h_forces, gsystem.d_forces, gsystem.Ntot*sizeof(float4), cudaMemcpyDeviceToHost); //int i; float3 force = make_float3(0.0f, 0.0f, 0.0f); for(i = 0; i < gsystem.Ntot; i++){ force.x += gsystem.h_forces[i].x; force.y += gsystem.h_forces[i].y; force.z += gsystem.h_forces[i].z; printf("%d: (%f, %f, %f) %f\n", i, gsystem.h_forces[i].x, gsystem.h_forces[i].y, gsystem.h_forces[i].z, sqrtf(gsystem.h_forces[i].x*gsystem.h_forces[i].x + gsystem.h_forces[i].y*gsystem.h_forces[i].y + gsystem.h_forces[i].z*gsystem.h_forces[i].z)); } printf("Net force (sasa): (%f, %f, %f) %f\n", force.x, force.y, force.z, sqrtf(force.x*force.x + force.y*force.y + force.z*force.z));*/ /*computeSASA_kernel<<<sasaBlockCount, sasaBlockSize>>>(); cudaMemcpy(sasaData.h_sasa, sasaData.d_sasa, gsystem.Ntot*sizeof(float), cudaMemcpyDeviceToHost); float totalSASA = 0.0; FILE* file = fopen("sasa.dat", "w"); int i; for(i = 0; i < gsystem.Ntot; i++){ if(sasaData.h_atomThread[i] != -1){ fprintf(file, "%d\t%f\n", i, sasaData.h_sasa[i]); totalSASA += sasaData.h_sasa[i]; } } fclose(file); printf("Total SASA: %f\n", totalSASA);*/ //exit(0); } inline void computeEnergy(){ if(step == 0){ updateSASAPairsList(); generateSASAList_kernel<<<sasaBlockCount, sasaBlockSize>>>(); computeSASAPotentialB_kernel<<<sasaBlockCount, sasaBlockSize>>>(); computeSASAPotentialEnergy_kernel<<<sasaBlockCount, sasaBlockSize>>>(); } cudaMemcpy(sasaData.h_sasaEnergies, sasaData.d_sasaEnergies, gsystem.Ntot*sizeof(float), cudaMemcpyDeviceToHost); checkCUDAError("sasa - Energy copy"); int i, traj; for(traj = 0; traj < parameters.Ntr; traj++){ float pot = 0.0f; for(i = 0; i < gsystem.N; i++){ pot += sasaData.h_sasaEnergies[i + gsystem.N*traj]; } energyOutput.values[traj] = pot; } /*cudaMemcpy(sasaData.h_pairs12Counts, sasaData.d_pairs12Counts, sasaData.threadsCountTot*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(sasaData.h_sasaListCount, sasaData.d_sasaListCount, sasaData.threadsCountTot*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(sasaData.h_sasaList, sasaData.d_sasaList, sasaData.widthTot*sasaData.maxSASAPairsPerAtom*sizeof(int), cudaMemcpyDeviceToHost); printf("\n"); for(i = 0; i < sasaData.threadsCount; i++){ int a1 = sasaData.h_threadAtom[i]; int j; printf("%d (%s, %d in list, %d cov): ", a1, topology.atoms[a1].name, sasaData.h_sasaListCount[i], sasaData.h_pairs12Counts[i]); for(j = 0; j < sasaData.h_sasaListCount[i]; j++){ printf("%d ", sasaData.h_sasaList[j*sasaData.widthTot + i]); } printf("\n"); }*/ } void destroy(){ } __global__ void updateSASAPairsList_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_sasaData.threadsCountTot){ int i, j, found; int a1, a2; a1 = c_sasaData.d_threadAtom[d_i]; float4 r1 = tex1Dfetch(t_coord, a1); float4 r2; int sasaPairsCount = 0; int traj = d_i/c_sasaData.threadsCount; for(i = traj*c_sasaData.threadsCount; i < (traj + 1)*c_sasaData.threadsCount; i++){ a2 = c_sasaData.d_threadAtom[i]; if(a2 != a1){ r2 = tex1Dfetch(t_coord, a2); r2 -= r1; DO_PBC(r2); r2.w = sqrtf(r2.x*r2.x + r2.y*r2.y + r2.z*r2.z); if(r2.w < c_sasaData.sasaPairsListCutoff){ int covalentCount = c_sasaData.d_pairs12Counts[d_i]; j = 0; found = 0; while(found == 0 && j < covalentCount){ if(a2 == c_sasaData.d_sasaList[j*c_sasaData.widthTot + d_i]){ found = 1; } j++; } //float pij; if(found == 0){ //pij = c_sasaData.pij_nb; /* j = -a2; } else {*/ // j = a2; //pij = c_sasaData.pij_cov; c_sasaData.d_pairsList[sasaPairsCount*c_sasaData.widthTot + d_i] = a2; sasaPairsCount ++; } //c_sasaData.d_pairsListPij[sasaPairsCount*c_sasaData.widthTot + d_i] = pij; /*if(found == 0){ c_sasaData.d_pairsList[sasaPairsCount*c_sasaData.widthTot + d_i] = i; sasaPairsCount ++; }*/ } } } c_sasaData.d_pairsListCount[d_i] = sasaPairsCount; } } inline void updateSASAPairsList(){ updateSASAPairsList_kernel<<<sasaPairsListBlockCount, sasaPairsListBlockSize>>>(); cudaMemcpy(sasaData.h_pairsListCount, sasaData.d_pairsListCount, sasaData.threadsCountTot*sizeof(int), cudaMemcpyDeviceToHost); int i, traj; for(traj = 0; traj < parameters.Ntr; traj++){ for(i = 0; i < sasaData.threadsCount; i++){ if(sasaData.h_pairsListCount[i] > MAX_SASA_PAIRSLIST_ITEMS_PER_ATOM){ DIE("Number of SASA pairs on atom %d (trajectory %d) is %d, which exceeds the limit of %d.", sasaData.h_threadAtom[i], traj+parameters.firstrun, sasaData.h_pairsListCount[i], MAX_SASA_PAIRSLIST_ITEMS_PER_ATOM); } } } cudaThreadSynchronize(); checkCUDAError("update sasa pairs"); cudaMemcpy(sasaData.h_pairsListCount, sasaData.d_pairsListCount, sasaData.threadsCountTot*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(sasaData.h_pairsList, sasaData.d_pairsList, sasaData.widthTot*sasaData.maxPairsListItemsPerAtom*sizeof(int), cudaMemcpyDeviceToHost); /*int j; printf("\n\nList of SASA pairs:\n"); for(i = 0; i < sasaData.threadsCountTot; i++){ if(i % sasaData.threadsCount == 0){ printf("%d. %d (%d): ", i, sasaData.h_threadAtom[i], sasaData.h_pairsListCount[i]); for(j = 0; j < sasaData.h_pairsListCount[i]; j++){ printf("%d ", sasaData.h_pairsList[j*sasaData.widthTot + i]); } printf("\n"); } } for(j = 0; j < sasaData.threadsCount; j++){ if(sasaData.h_pairsListCount[j] != sasaData.h_pairsListCount[j+traj*sasaData.threadsCount]){ DIE("Sukanah!"); } }*/ } void destroySASAPairsListUpdater(){ } #undef LOG } // namespace sasa_potential
c82c2623096d000072ead89c37bccf1fd299d595.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void binaryTestingKernel ( int batchStart, int length, float* predictions, float* targets, int* result) { int withinBatch = blockIdx.x; int instanceStart = batchStart + withinBatch * length; int instanceEnd = instanceStart + length; for(int indexEntry = instanceStart; indexEntry < instanceEnd; indexEntry++) { float prediction = predictions[indexEntry]; float target = targets[indexEntry]; result[indexEntry] = (prediction < 0.5 && target == 0.0) || (prediction >= 0.5 && target == 1.0); } }
c82c2623096d000072ead89c37bccf1fd299d595.cu
#include "includes.h" __global__ void binaryTestingKernel ( int batchStart, int length, float* predictions, float* targets, int* result) { int withinBatch = blockIdx.x; int instanceStart = batchStart + withinBatch * length; int instanceEnd = instanceStart + length; for(int indexEntry = instanceStart; indexEntry < instanceEnd; indexEntry++) { float prediction = predictions[indexEntry]; float target = targets[indexEntry]; result[indexEntry] = (prediction < 0.5 && target == 0.0) || (prediction >= 0.5 && target == 1.0); } }
12bfe5e413f047f2d29334afb07cbd12d50ee03c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorRandom.hip" #else #include "ATen/hip/HIPContext.h" #define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_normal), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(normal_means)(THCState *state, THCTensor *self, THCTensor *means, double stddev) { THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, stddev); THCTensor_(cadd)(state, self, self, ScalarConvert<int, scalar_t>::to(1), means); } void THCTensor_(normal_stddevs)(THCState *state, THCTensor *self, double mean, THCTensor *stddevs) { THCTensor_(resizeAs)(state, self, stddevs); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); THCTensor_(add)(state, self, self, ScalarConvert<double, scalar_t>::to(mean)); } void THCTensor_(normal_means_stddevs)(THCState *state, THCTensor *self, THCTensor *means, THCTensor *stddevs) { THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); THCTensor_(cadd)(state, self, self, ScalarConvert<int, scalar_t>::to(1), means); } void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generateLogNormal<scalar_t>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_exponential), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, lambda); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_cauchy), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, median, sigma); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(renormRows)(struct THCState* state, THCTensor* t) { THAssert(THCTensor_(nDimensionLegacyAll)(state, t) == 2); int64_t rows = THCTensor_(size)(state, t, 0); int64_t cols = THCTensor_(size)(state, t, 1); hipDeviceProp_t* props = at::cuda::getCurrentDeviceProperties(); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; dim3 grid(rows < numSM * 4 ? rows : numSM * 4); dim3 block(cols < maxThreads ? cols : maxThreads); hipLaunchKernelGGL(( renormRowsL1<scalar_t>) , dim3(grid), dim3(block), block.x * sizeof(scalar_t), THCState_getCurrentStream(state), THCTensor_(data)(state, t), rows, cols); } void THCTensor_(multinomial)(struct THCState *state, THCudaLongTensor *self, THCTensor *prob_dist, int n_sample, int with_replacement) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist)); THCGenerator* gen = THCRandom_getGenerator(state); int inputSize = THCTensor_(nDimensionLegacyAll)(state, prob_dist); THArgCheck(inputSize > 0 && inputSize <= 2, 2, "prob_dist must be 1 or 2 dim"); // Categories are in the innermost dimension int64_t numDist = inputSize == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0); int64_t numCategoriesLong = inputSize == 1 ? THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0) : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 1); // Since the index tensor is float, numCategories cannot exceed max // float integer precision THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2, "number of categories cannot exceed 2^24"); int numCategories = (int) numCategoriesLong; THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples"); if (!with_replacement) { THArgCheck(n_sample <= numCategories, 2, "cannot sample n_sample > prob_dist:size(1) samples without " "replacement"); } int free_prob_dist = 0; // Restructure data for 2d if (inputSize == 1) { THCTensor *temp = THCTensor_(new)(state); THCTensor_(unsqueeze1d)(state, temp, prob_dist, 0); prob_dist = temp; free_prob_dist = 1; } THCudaLongTensor_resize2d(state, self, numDist, n_sample); // get current device properties hipDeviceProp_t* props = at::cuda::getCurrentDeviceProperties(); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; int maxShared = props->sharedMemPerBlock; int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads) * (sizeof(scalar_t) + sizeof(accreal)); if (n_sample == 1 && maxShared >= requiredShared) { // Optimized allocation-free implementation // To exploit greater parallelism for the sampling, generate the // Uniform random samples in a separate kernel launch, into // temporarily allocated memory. The device RNG is thread-limited THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample); auto out = THTensor_wrap(sampled); at::native::uniform_cuda_(out, 0.0, 1.0); dim3 block(numCategories < maxThreads ? numCategories : maxThreads); dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4); hipLaunchKernelGGL(( sampleMultinomialOnce<scalar_t, accreal>) , dim3(grid), dim3(block), requiredShared, THCState_getCurrentStream(state), THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, sampled), THCTensor_(data)(state, prob_dist), THCTensor_(stride)(state, prob_dist, 0), THCTensor_(stride)(state, prob_dist, 1) ); THCTensor_(free)(state, sampled); } else { // Generic, slow implementation with memory allocations // For sampling without replacement, we modify the distribution // for subsequent samples in this space THCTensor* origDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, origDist, prob_dist); THCTensor_(copy)(state, origDist, prob_dist); THCTensor* normDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, normDist, prob_dist); THCTensor* prefixSum = THCTensor_(new)(state); // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); if (with_replacement) { // Sample with replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from one // distribution concurrently. dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS); hipLaunchKernelGGL(( sampleMultinomialWithReplacement) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), gen->state.gen_states, n_sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, prefixSum), THCTensor_(data)(state, normDist)); } else { // Sample without replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from a different // distribution concurrently. ptrdiff_t numBlocks = THCCeilDiv(numDist, (int64_t) 4); dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS); for (int sample = 0; sample < n_sample; ++sample) { if (sample > 0) { // Update probabilities // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); } // The kernel can only draw one sample before we have to // recalculate our distribution hipLaunchKernelGGL(( sampleMultinomialWithoutReplacement) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), gen->state.gen_states, n_sample, sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, origDist), THCTensor_(data)(state, prefixSum)); } } THCTensor_(free)(state, prefixSum); THCTensor_(free)(state, normDist); THCTensor_(free)(state, origDist); } // Revert data restructuring based on input sizes if (inputSize == 1) { THCudaLongTensor_resize1d(state, self, n_sample); } if (free_prob_dist) { THCTensor_(free)(state, prob_dist); } } void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){ THArgCheck(_probs->dim() == 1, 1, "expected 1-D probability tensor, got %d-D probability tensor instead", _probs->dim()); THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THCTensor *probs = THCTensor_(newContiguous)(state, _probs); THAssert(THCTensor_(isContiguous)(state, probs)); int64_t inputsize = THCTensor_(nElement)(state, probs); THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor_resize1d(state, _J, inputsize); THCTensor_(resize1d)(state, _q, inputsize); scalar_t one = ScalarConvert<int64_t, scalar_t>::to(1); int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE); hipLaunchKernelGGL(( aliasMultinomialFilter) , dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state) , THCTensor_(data)(state, _q), THCTensor_(data)(state, probs), THCudaLongTensor_data(state, smaller), THCudaLongTensor_data(state, larger), THCudaLongTensor_data(state, _J), THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), one, inputsize ); THCudaLongTensor_nonzero(state, smaller_short, smaller); THCudaLongTensor_nonzero(state, larger_short, larger); int h_large_c = THCudaLongTensor_nElement(state, larger_short); THCudaLongTensor_resize1d(state, smaller_short, inputsize); THCudaLongTensor_resize1d(state, larger_short, inputsize); hipLaunchKernelGGL(( aliasMultinomialSetup) , dim3(1), dim3(1), 0, THCState_getCurrentStream(state), THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), inputsize, THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), inputsize - h_large_c, h_large_c ); scalar_t q_max = THCTensor_(maxall)(state, _q); hipLaunchKernelGGL(( condDiv), dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, _q), THCudaLongTensor_data(state, _J), inputsize, q_max ); THCudaLongTensor_free(state, smaller); THCudaLongTensor_free(state, larger); THCudaLongTensor_free(state, smaller_short); THCudaLongTensor_free(state, larger_short); THCTensor_free(state, probs); } void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCTensor *_q, THCudaLongTensor *_J, int n_sample){ THArgCheck(_q->dim() == 1, 1, "expected 1-D probability table, got %d-D probability table instead", _q->dim()); THArgCheck(_J->dim() == 1, 2, "expected 1-D alias table, got %d-D alias table instead", _J->dim()); THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples"); THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THCGenerator* gen = THCRandom_getGenerator(state); int64_t K = THCudaLongTensor_nElement(state, _J); THCudaLongTensor_resize1d(state, self, n_sample); ptrdiff_t size = THCudaLongTensor_nElement(state, self); THCTensor *uniform = THCTensor_(newWithSize1d)(state, n_sample); THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, n_sample); auto out_uniform = THTensor_wrap(uniform); auto out_bernoulli = THTensor_wrap(bernoulli); at::native::uniform_cuda_(out_uniform, 0, K); at::native::uniform_cuda_(out_bernoulli, 0, 1); hipLaunchKernelGGL(( multinomialAliasDrawKernel) , dim3(THCCeilDiv((int)n_sample+BLOCK_SIZE-1, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), size, THCudaLongTensor_data(state, self), THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), K, THCTensor_(data)(state, uniform), THCTensor_(data)(state, bernoulli) ); THCTensor_(free)(state, uniform); THCTensor_(free)(state, bernoulli); } #endif #if defined(THC_REAL_IS_DOUBLE) GENERATE_KERNEL1(generate_geometric, double, double p, double, hiprand_uniform_double, ceil(log(x) / log(1-p))) #else GENERATE_KERNEL1(generate_geometric, scalar_t, double p, float, hiprand_uniform, (ScalarConvert<float, scalar_t>::to(ceilf(logf(x) / log(1-p))))) #endif #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) #define CURAND64(STATE) (((uint64_t)hiprand(STATE)) << 32) | (uint64_t)hiprand(STATE) GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, hiprand, \ static_cast<scalar_t>(static_cast<int32_t>((x % range) + base))) GENERATE_KERNEL2(generate_random_64, scalar_t, int64_t base, uint64_t range, uint64_t, CURAND64, \ static_cast<scalar_t>(static_cast<int64_t>((x % range) + base))) #elif defined(THC_REAL_IS_HALF) GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, hiprand, (ScalarConvert<int32_t, scalar_t>::to(static_cast<int32_t>(x % range + base)))) #else GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, hiprand, static_cast<scalar_t>(static_cast<int32_t>(x % range + base))) #endif void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_geometric), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, p); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(clampedRandom)(THCState* state, THCTensor *self_, int64_t min_val, int64_t max_val) { THArgCheck(min_val < max_val, 2, "max must be greater than min, but got: min = %lld, max = %lld", min_val, max_val); THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); uint64_t range = max_val - min_val; #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) if (range > 1ULL << 32) { hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, min_val, range); } else { #endif hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(min_val), static_cast<uint32_t>(range)); #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) } #endif THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(cappedRandom)(THCState* state, THCTensor *self_, int64_t max_val) { THCTensor_(clampedRandom)(state, self_, 0LL, max_val); }; #define HLF_MANT_DIG 11 void THCTensor_(random)(THCState* state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); #if defined(THC_REAL_IS_HALF) hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>((1UL << HLF_MANT_DIG) + 1)); #elif defined(THC_REAL_IS_FLOAT) hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>((1UL << FLT_MANT_DIG) + 1)); #elif defined(THC_REAL_IS_DOUBLE) hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, static_cast<int64_t>(0ULL), static_cast<uint64_t>((1ULL << DBL_MANT_DIG) + 1)); #elif defined(THC_REAL_IS_LONG) hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, static_cast<int64_t>(0ULL), static_cast<uint64_t>(std::numeric_limits<scalar_t>::max()) + 1); #else hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>(std::numeric_limits<scalar_t>::max()) + 1); #endif THCTensor_(freeCopyTo)(state, self, self_); }; #undef HLF_MANT_DIG #undef CURAND64 #undef NUM_BLOCKS #endif
12bfe5e413f047f2d29334afb07cbd12d50ee03c.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorRandom.cu" #else #include "ATen/cuda/CUDAContext.h" #define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generate_normal<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(normal_means)(THCState *state, THCTensor *self, THCTensor *means, double stddev) { THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, stddev); THCTensor_(cadd)(state, self, self, ScalarConvert<int, scalar_t>::to(1), means); } void THCTensor_(normal_stddevs)(THCState *state, THCTensor *self, double mean, THCTensor *stddevs) { THCTensor_(resizeAs)(state, self, stddevs); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); THCTensor_(add)(state, self, self, ScalarConvert<double, scalar_t>::to(mean)); } void THCTensor_(normal_means_stddevs)(THCState *state, THCTensor *self, THCTensor *means, THCTensor *stddevs) { THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); THCTensor_(cadd)(state, self, self, ScalarConvert<int, scalar_t>::to(1), means); } void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generateLogNormal<scalar_t><<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generate_exponential<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, lambda); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generate_cauchy<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, median, sigma); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(renormRows)(struct THCState* state, THCTensor* t) { THAssert(THCTensor_(nDimensionLegacyAll)(state, t) == 2); int64_t rows = THCTensor_(size)(state, t, 0); int64_t cols = THCTensor_(size)(state, t, 1); cudaDeviceProp* props = at::cuda::getCurrentDeviceProperties(); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; dim3 grid(rows < numSM * 4 ? rows : numSM * 4); dim3 block(cols < maxThreads ? cols : maxThreads); renormRowsL1<scalar_t> <<<grid, block, block.x * sizeof(scalar_t), THCState_getCurrentStream(state)>>>(THCTensor_(data)(state, t), rows, cols); } void THCTensor_(multinomial)(struct THCState *state, THCudaLongTensor *self, THCTensor *prob_dist, int n_sample, int with_replacement) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist)); THCGenerator* gen = THCRandom_getGenerator(state); int inputSize = THCTensor_(nDimensionLegacyAll)(state, prob_dist); THArgCheck(inputSize > 0 && inputSize <= 2, 2, "prob_dist must be 1 or 2 dim"); // Categories are in the innermost dimension int64_t numDist = inputSize == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0); int64_t numCategoriesLong = inputSize == 1 ? THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0) : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 1); // Since the index tensor is float, numCategories cannot exceed max // float integer precision THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2, "number of categories cannot exceed 2^24"); int numCategories = (int) numCategoriesLong; THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples"); if (!with_replacement) { THArgCheck(n_sample <= numCategories, 2, "cannot sample n_sample > prob_dist:size(1) samples without " "replacement"); } int free_prob_dist = 0; // Restructure data for 2d if (inputSize == 1) { THCTensor *temp = THCTensor_(new)(state); THCTensor_(unsqueeze1d)(state, temp, prob_dist, 0); prob_dist = temp; free_prob_dist = 1; } THCudaLongTensor_resize2d(state, self, numDist, n_sample); // get current device properties cudaDeviceProp* props = at::cuda::getCurrentDeviceProperties(); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; int maxShared = props->sharedMemPerBlock; int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads) * (sizeof(scalar_t) + sizeof(accreal)); if (n_sample == 1 && maxShared >= requiredShared) { // Optimized allocation-free implementation // To exploit greater parallelism for the sampling, generate the // Uniform random samples in a separate kernel launch, into // temporarily allocated memory. The device RNG is thread-limited THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample); auto out = THTensor_wrap(sampled); at::native::uniform_cuda_(out, 0.0, 1.0); dim3 block(numCategories < maxThreads ? numCategories : maxThreads); dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4); sampleMultinomialOnce<scalar_t, accreal> <<<grid, block, requiredShared, THCState_getCurrentStream(state)>>>( THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, sampled), THCTensor_(data)(state, prob_dist), THCTensor_(stride)(state, prob_dist, 0), THCTensor_(stride)(state, prob_dist, 1) ); THCTensor_(free)(state, sampled); } else { // Generic, slow implementation with memory allocations // For sampling without replacement, we modify the distribution // for subsequent samples in this space THCTensor* origDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, origDist, prob_dist); THCTensor_(copy)(state, origDist, prob_dist); THCTensor* normDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, normDist, prob_dist); THCTensor* prefixSum = THCTensor_(new)(state); // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); if (with_replacement) { // Sample with replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from one // distribution concurrently. dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS); sampleMultinomialWithReplacement <<<grid, block, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, n_sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, prefixSum), THCTensor_(data)(state, normDist)); } else { // Sample without replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from a different // distribution concurrently. ptrdiff_t numBlocks = THCCeilDiv(numDist, (int64_t) 4); dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS); for (int sample = 0; sample < n_sample; ++sample) { if (sample > 0) { // Update probabilities // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); } // The kernel can only draw one sample before we have to // recalculate our distribution sampleMultinomialWithoutReplacement <<<grid, block, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, n_sample, sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, origDist), THCTensor_(data)(state, prefixSum)); } } THCTensor_(free)(state, prefixSum); THCTensor_(free)(state, normDist); THCTensor_(free)(state, origDist); } // Revert data restructuring based on input sizes if (inputSize == 1) { THCudaLongTensor_resize1d(state, self, n_sample); } if (free_prob_dist) { THCTensor_(free)(state, prob_dist); } } void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){ THArgCheck(_probs->dim() == 1, 1, "expected 1-D probability tensor, got %d-D probability tensor instead", _probs->dim()); THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THCTensor *probs = THCTensor_(newContiguous)(state, _probs); THAssert(THCTensor_(isContiguous)(state, probs)); int64_t inputsize = THCTensor_(nElement)(state, probs); THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor_resize1d(state, _J, inputsize); THCTensor_(resize1d)(state, _q, inputsize); scalar_t one = ScalarConvert<int64_t, scalar_t>::to(1); int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE); aliasMultinomialFilter <<<inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state) >>>( THCTensor_(data)(state, _q), THCTensor_(data)(state, probs), THCudaLongTensor_data(state, smaller), THCudaLongTensor_data(state, larger), THCudaLongTensor_data(state, _J), THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), one, inputsize ); THCudaLongTensor_nonzero(state, smaller_short, smaller); THCudaLongTensor_nonzero(state, larger_short, larger); int h_large_c = THCudaLongTensor_nElement(state, larger_short); THCudaLongTensor_resize1d(state, smaller_short, inputsize); THCudaLongTensor_resize1d(state, larger_short, inputsize); aliasMultinomialSetup <<<1, 1, 0, THCState_getCurrentStream(state)>>>( THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), inputsize, THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), inputsize - h_large_c, h_large_c ); scalar_t q_max = THCTensor_(maxall)(state, _q); condDiv<<< inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, _q), THCudaLongTensor_data(state, _J), inputsize, q_max ); THCudaLongTensor_free(state, smaller); THCudaLongTensor_free(state, larger); THCudaLongTensor_free(state, smaller_short); THCudaLongTensor_free(state, larger_short); THCTensor_free(state, probs); } void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCTensor *_q, THCudaLongTensor *_J, int n_sample){ THArgCheck(_q->dim() == 1, 1, "expected 1-D probability table, got %d-D probability table instead", _q->dim()); THArgCheck(_J->dim() == 1, 2, "expected 1-D alias table, got %d-D alias table instead", _J->dim()); THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples"); THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THCGenerator* gen = THCRandom_getGenerator(state); int64_t K = THCudaLongTensor_nElement(state, _J); THCudaLongTensor_resize1d(state, self, n_sample); ptrdiff_t size = THCudaLongTensor_nElement(state, self); THCTensor *uniform = THCTensor_(newWithSize1d)(state, n_sample); THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, n_sample); auto out_uniform = THTensor_wrap(uniform); auto out_bernoulli = THTensor_wrap(bernoulli); at::native::uniform_cuda_(out_uniform, 0, K); at::native::uniform_cuda_(out_bernoulli, 0, 1); multinomialAliasDrawKernel <<<THCCeilDiv((int)n_sample+BLOCK_SIZE-1, BLOCK_SIZE), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( size, THCudaLongTensor_data(state, self), THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), K, THCTensor_(data)(state, uniform), THCTensor_(data)(state, bernoulli) ); THCTensor_(free)(state, uniform); THCTensor_(free)(state, bernoulli); } #endif #if defined(THC_REAL_IS_DOUBLE) GENERATE_KERNEL1(generate_geometric, double, double p, double, curand_uniform_double, ceil(log(x) / log(1-p))) #else GENERATE_KERNEL1(generate_geometric, scalar_t, double p, float, curand_uniform, (ScalarConvert<float, scalar_t>::to(ceilf(logf(x) / log(1-p))))) #endif #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) #define CURAND64(STATE) (((uint64_t)curand(STATE)) << 32) | (uint64_t)curand(STATE) GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, curand, \ static_cast<scalar_t>(static_cast<int32_t>((x % range) + base))) GENERATE_KERNEL2(generate_random_64, scalar_t, int64_t base, uint64_t range, uint64_t, CURAND64, \ static_cast<scalar_t>(static_cast<int64_t>((x % range) + base))) #elif defined(THC_REAL_IS_HALF) GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, curand, (ScalarConvert<int32_t, scalar_t>::to(static_cast<int32_t>(x % range + base)))) #else GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, curand, static_cast<scalar_t>(static_cast<int32_t>(x % range + base))) #endif void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generate_geometric<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, p); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(clampedRandom)(THCState* state, THCTensor *self_, int64_t min_val, int64_t max_val) { THArgCheck(min_val < max_val, 2, "max must be greater than min, but got: min = %lld, max = %lld", min_val, max_val); THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); uint64_t range = max_val - min_val; #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) if (range > 1ULL << 32) { generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, min_val, range); } else { #endif generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(min_val), static_cast<uint32_t>(range)); #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) } #endif THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(cappedRandom)(THCState* state, THCTensor *self_, int64_t max_val) { THCTensor_(clampedRandom)(state, self_, 0LL, max_val); }; #define HLF_MANT_DIG 11 void THCTensor_(random)(THCState* state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); #if defined(THC_REAL_IS_HALF) generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>((1UL << HLF_MANT_DIG) + 1)); #elif defined(THC_REAL_IS_FLOAT) generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>((1UL << FLT_MANT_DIG) + 1)); #elif defined(THC_REAL_IS_DOUBLE) generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, static_cast<int64_t>(0ULL), static_cast<uint64_t>((1ULL << DBL_MANT_DIG) + 1)); #elif defined(THC_REAL_IS_LONG) generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, static_cast<int64_t>(0ULL), static_cast<uint64_t>(std::numeric_limits<scalar_t>::max()) + 1); #else generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>(std::numeric_limits<scalar_t>::max()) + 1); #endif THCTensor_(freeCopyTo)(state, self, self_); }; #undef HLF_MANT_DIG #undef CURAND64 #undef NUM_BLOCKS #endif
15885edfb8ed76a79975ee832638d525936dc726.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kd-tree-build.cuh" #include "multiple-radix-select.cuh" #include "quick-select.cuh" #include "radix-select.cuh" #include "stdio.h" #include "point.h" #include "helper_cuda.h" int nextPowerOf2_(int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } void UpDim(int &dim) { dim = (dim + 1) % 3; } void getThreadAndBlockCountForBuild(int n, int &blocks, int &threads) { threads = min(nextPowerOf2_(n), 512); blocks = n / threads; blocks = max(1, blocks); blocks = min(MAX_BLOCK_DIM_SIZE, blocks); // printf("block = %d, threads = %d, n = %d\n", blocks, threads, n); } __device__ void cuCalculateBlockOffsetAndNoOfLists_(int n, int &n_per_block, int &block_offset) { int rest = n % gridDim.x; n_per_block = n / gridDim.x; block_offset = n_per_block * blockIdx.x; if (rest >= gridDim.x - blockIdx.x) { block_offset += rest - (gridDim.x - blockIdx.x); n_per_block++; } } __device__ void cuPointSwapCondition(struct Point *p, int a, int b, int dim) { struct Point temp_a = p[a], temp_b = p[b]; if (temp_a.p[dim] > temp_b.p[dim] ) { p[a] = temp_b, p[b] = temp_a; } } __global__ void balanceLeafs(struct Point *points, int *steps, int p, int dim) { struct Point *l_points; int list_in_block, block_offset, tid = threadIdx.x, step_num, n; cuCalculateBlockOffsetAndNoOfLists_(p, list_in_block, block_offset); steps += block_offset * 2; while ( tid < list_in_block) { step_num = tid * 2; l_points = points + steps[step_num]; n = steps[step_num + 1] - steps[step_num]; if (n == 2) { cuPointSwapCondition(l_points, 0, 1, dim); } else if (n == 3) { cuPointSwapCondition(l_points, 0, 1, dim); cuPointSwapCondition(l_points, 1, 2, dim); cuPointSwapCondition(l_points, 0, 1, dim); } tid += blockDim.x; } } int store_locations(struct Node *tree, int lower, int upper, int n) { int r; if (lower >= upper) { return -1; } r = (int) ((upper - lower) / 2) + lower; tree[r].left = store_locations(tree, lower, r, n); tree[r].right = store_locations(tree, r + 1, upper, n); return r; } __device__ __host__ void pointConvert(struct Node &p1, struct Point &p2) { p1.p[0] = p2.p[0], p1.p[1] = p2.p[1], p1.p[2] = p2.p[2]; #ifdef ADD_POINT_ID p1.id = p2.id; #endif } __global__ void convertPoints(struct Point *points_small, int n, struct Node *points) { int local_n, block_offset, tid = threadIdx.x; cuCalculateBlockOffsetAndNoOfLists_(n, local_n, block_offset); points += block_offset; points_small += block_offset; while (tid < local_n) { pointConvert(points[tid], points_small[tid]); tid += blockDim.x; } } void nextStep(int *steps_new, int *steps_old, int n) { int i, midpoint, from, to; for (i = 0; i < n / 2; ++i) { from = steps_old[i * 2]; to = steps_old[i * 2 + 1]; midpoint = (to - from) / 2 + from; steps_new[i * 4] = from; steps_new[i * 4 + 1] = midpoint; steps_new[i * 4 + 2] = midpoint + 1; steps_new[i * 4 + 3] = to; } } void swap_pointer(int **a, int **b) { int *swap; swap = *a, *a = *b, *b = swap; } void singleRadixSelectAndPartition(struct Point *d_points, struct Point *d_swap, int *d_partition, int *h_steps, int p, int dir) { int nn, offset, j; for (j = 0; j < p; j ++) { offset = h_steps[j * 2]; nn = h_steps[j * 2 + 1] - offset; if (nn > 1) { radixSelectAndPartition(d_points + offset, d_swap + offset, d_partition + offset, nn, dir); } } } size_t getFreeBytesOnGpu_() { size_t free_byte, total_byte ; hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ) ; return free_byte; } size_t getNeededBytesForBuildingKdTree(int n) { int number_of_leafs = (n + 1) / 2; return (number_of_leafs * 2 * sizeof(int)) + (2 * n * sizeof(int)) + (2 * n * sizeof(Point)); } void cuBuildKdTree(struct Point *h_points, int n, int dim, struct Node *tree) { struct Point *d_points, *d_swap; struct Node *d_tree; int *d_partition, block_num, thread_num, *d_steps, *h_steps_old, *h_steps_new, step, i = 0, p = 1, number_of_leafs = (n + 1) / 2, h = (int)ceil(log2((float)n + 1)); h_steps_new = (int *)malloc(number_of_leafs * 2 * sizeof(int)); h_steps_old = (int *)malloc(number_of_leafs * 2 * sizeof(int)); h_steps_new[0] = 0; h_steps_old[0] = 0; h_steps_old[1] = n; h_steps_new[1] = n; checkCudaErrors( hipMalloc(&d_steps, number_of_leafs * 2 * sizeof(int))); checkCudaErrors( hipMalloc(&d_partition, n * sizeof(int))); checkCudaErrors( hipMalloc(&d_points, n * sizeof(Point))); checkCudaErrors( hipMalloc(&d_swap, n * sizeof(Point))); checkCudaErrors( hipMemcpy(d_points, h_points, n * sizeof(Point), hipMemcpyHostToDevice)); radixSelectAndPartition(d_points, d_swap, d_partition, n, dim); UpDim(dim); i++; while (i < (h - 1) ) { nextStep(h_steps_new, h_steps_old, p <<= 1); step = h_steps_new[1] - h_steps_new[0]; checkCudaErrors( hipMemcpy(d_steps, h_steps_new, p * 2 * sizeof(int), hipMemcpyHostToDevice)); if (step >= 9000000) { singleRadixSelectAndPartition(d_points, d_swap, d_partition, h_steps_new, p, dim); } else if (step > 3000) { multiRadixSelectAndPartition(d_points, d_swap, d_partition, d_steps, step, p, dim); } else if (step > 3) { quickSelectAndPartition(d_points, d_steps, step, p, dim); } else { getThreadAndBlockCountForBuild(n, block_num, thread_num); hipLaunchKernelGGL(( balanceLeafs) , dim3(block_num), dim3(thread_num) , 0, 0, d_points, d_steps, p, dim); } swap_pointer(&h_steps_new, &h_steps_old); i++; UpDim(dim); } checkCudaErrors(hipFree(d_swap)); checkCudaErrors(hipFree(d_partition)); checkCudaErrors(hipFree(d_steps)); free(h_steps_new); free(h_steps_old); checkCudaErrors(hipMalloc(&d_tree, n * sizeof(Node))); getThreadAndBlockCountForBuild(n, block_num, thread_num); hipLaunchKernelGGL(( convertPoints) , dim3(block_num), dim3(thread_num) , 0, 0, d_points, n, d_tree); checkCudaErrors(hipMemcpy(tree, d_tree, n * sizeof(Node), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_points)); checkCudaErrors(hipFree(d_tree)); } void buildKdTreeStep(struct Point *h_points, int n, int dim, struct Node *tree) { if (n <= 0) return; size_t free_bytes, needed_bytes; int m = n >> 1; free_bytes = getFreeBytesOnGpu_(); needed_bytes = getNeededBytesForBuildingKdTree(n); if (free_bytes > needed_bytes) { cuBuildKdTree(h_points, n, dim, tree); } else { cpuQuickSelect(h_points, n, dim); pointConvert(tree[m], h_points[m]); UpDim(dim); buildKdTreeStep(h_points, m, dim, tree); buildKdTreeStep(h_points + m + 1, n - m - 1, dim, tree + m + 1); } } void buildKdTree(struct Point *h_points, int n, struct Node *tree) { int dim = 0; buildKdTreeStep(h_points, n, dim, tree); store_locations(tree, 0, n, n); }
15885edfb8ed76a79975ee832638d525936dc726.cu
#include "kd-tree-build.cuh" #include "multiple-radix-select.cuh" #include "quick-select.cuh" #include "radix-select.cuh" #include "stdio.h" #include "point.h" #include "helper_cuda.h" int nextPowerOf2_(int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } void UpDim(int &dim) { dim = (dim + 1) % 3; } void getThreadAndBlockCountForBuild(int n, int &blocks, int &threads) { threads = min(nextPowerOf2_(n), 512); blocks = n / threads; blocks = max(1, blocks); blocks = min(MAX_BLOCK_DIM_SIZE, blocks); // printf("block = %d, threads = %d, n = %d\n", blocks, threads, n); } __device__ void cuCalculateBlockOffsetAndNoOfLists_(int n, int &n_per_block, int &block_offset) { int rest = n % gridDim.x; n_per_block = n / gridDim.x; block_offset = n_per_block * blockIdx.x; if (rest >= gridDim.x - blockIdx.x) { block_offset += rest - (gridDim.x - blockIdx.x); n_per_block++; } } __device__ void cuPointSwapCondition(struct Point *p, int a, int b, int dim) { struct Point temp_a = p[a], temp_b = p[b]; if (temp_a.p[dim] > temp_b.p[dim] ) { p[a] = temp_b, p[b] = temp_a; } } __global__ void balanceLeafs(struct Point *points, int *steps, int p, int dim) { struct Point *l_points; int list_in_block, block_offset, tid = threadIdx.x, step_num, n; cuCalculateBlockOffsetAndNoOfLists_(p, list_in_block, block_offset); steps += block_offset * 2; while ( tid < list_in_block) { step_num = tid * 2; l_points = points + steps[step_num]; n = steps[step_num + 1] - steps[step_num]; if (n == 2) { cuPointSwapCondition(l_points, 0, 1, dim); } else if (n == 3) { cuPointSwapCondition(l_points, 0, 1, dim); cuPointSwapCondition(l_points, 1, 2, dim); cuPointSwapCondition(l_points, 0, 1, dim); } tid += blockDim.x; } } int store_locations(struct Node *tree, int lower, int upper, int n) { int r; if (lower >= upper) { return -1; } r = (int) ((upper - lower) / 2) + lower; tree[r].left = store_locations(tree, lower, r, n); tree[r].right = store_locations(tree, r + 1, upper, n); return r; } __device__ __host__ void pointConvert(struct Node &p1, struct Point &p2) { p1.p[0] = p2.p[0], p1.p[1] = p2.p[1], p1.p[2] = p2.p[2]; #ifdef ADD_POINT_ID p1.id = p2.id; #endif } __global__ void convertPoints(struct Point *points_small, int n, struct Node *points) { int local_n, block_offset, tid = threadIdx.x; cuCalculateBlockOffsetAndNoOfLists_(n, local_n, block_offset); points += block_offset; points_small += block_offset; while (tid < local_n) { pointConvert(points[tid], points_small[tid]); tid += blockDim.x; } } void nextStep(int *steps_new, int *steps_old, int n) { int i, midpoint, from, to; for (i = 0; i < n / 2; ++i) { from = steps_old[i * 2]; to = steps_old[i * 2 + 1]; midpoint = (to - from) / 2 + from; steps_new[i * 4] = from; steps_new[i * 4 + 1] = midpoint; steps_new[i * 4 + 2] = midpoint + 1; steps_new[i * 4 + 3] = to; } } void swap_pointer(int **a, int **b) { int *swap; swap = *a, *a = *b, *b = swap; } void singleRadixSelectAndPartition(struct Point *d_points, struct Point *d_swap, int *d_partition, int *h_steps, int p, int dir) { int nn, offset, j; for (j = 0; j < p; j ++) { offset = h_steps[j * 2]; nn = h_steps[j * 2 + 1] - offset; if (nn > 1) { radixSelectAndPartition(d_points + offset, d_swap + offset, d_partition + offset, nn, dir); } } } size_t getFreeBytesOnGpu_() { size_t free_byte, total_byte ; cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ; return free_byte; } size_t getNeededBytesForBuildingKdTree(int n) { int number_of_leafs = (n + 1) / 2; return (number_of_leafs * 2 * sizeof(int)) + (2 * n * sizeof(int)) + (2 * n * sizeof(Point)); } void cuBuildKdTree(struct Point *h_points, int n, int dim, struct Node *tree) { struct Point *d_points, *d_swap; struct Node *d_tree; int *d_partition, block_num, thread_num, *d_steps, *h_steps_old, *h_steps_new, step, i = 0, p = 1, number_of_leafs = (n + 1) / 2, h = (int)ceil(log2((float)n + 1)); h_steps_new = (int *)malloc(number_of_leafs * 2 * sizeof(int)); h_steps_old = (int *)malloc(number_of_leafs * 2 * sizeof(int)); h_steps_new[0] = 0; h_steps_old[0] = 0; h_steps_old[1] = n; h_steps_new[1] = n; checkCudaErrors( cudaMalloc(&d_steps, number_of_leafs * 2 * sizeof(int))); checkCudaErrors( cudaMalloc(&d_partition, n * sizeof(int))); checkCudaErrors( cudaMalloc(&d_points, n * sizeof(Point))); checkCudaErrors( cudaMalloc(&d_swap, n * sizeof(Point))); checkCudaErrors( cudaMemcpy(d_points, h_points, n * sizeof(Point), cudaMemcpyHostToDevice)); radixSelectAndPartition(d_points, d_swap, d_partition, n, dim); UpDim(dim); i++; while (i < (h - 1) ) { nextStep(h_steps_new, h_steps_old, p <<= 1); step = h_steps_new[1] - h_steps_new[0]; checkCudaErrors( cudaMemcpy(d_steps, h_steps_new, p * 2 * sizeof(int), cudaMemcpyHostToDevice)); if (step >= 9000000) { singleRadixSelectAndPartition(d_points, d_swap, d_partition, h_steps_new, p, dim); } else if (step > 3000) { multiRadixSelectAndPartition(d_points, d_swap, d_partition, d_steps, step, p, dim); } else if (step > 3) { quickSelectAndPartition(d_points, d_steps, step, p, dim); } else { getThreadAndBlockCountForBuild(n, block_num, thread_num); balanceLeafs <<< block_num, thread_num >>> (d_points, d_steps, p, dim); } swap_pointer(&h_steps_new, &h_steps_old); i++; UpDim(dim); } checkCudaErrors(cudaFree(d_swap)); checkCudaErrors(cudaFree(d_partition)); checkCudaErrors(cudaFree(d_steps)); free(h_steps_new); free(h_steps_old); checkCudaErrors(cudaMalloc(&d_tree, n * sizeof(Node))); getThreadAndBlockCountForBuild(n, block_num, thread_num); convertPoints <<< block_num, thread_num >>> (d_points, n, d_tree); checkCudaErrors(cudaMemcpy(tree, d_tree, n * sizeof(Node), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_points)); checkCudaErrors(cudaFree(d_tree)); } void buildKdTreeStep(struct Point *h_points, int n, int dim, struct Node *tree) { if (n <= 0) return; size_t free_bytes, needed_bytes; int m = n >> 1; free_bytes = getFreeBytesOnGpu_(); needed_bytes = getNeededBytesForBuildingKdTree(n); if (free_bytes > needed_bytes) { cuBuildKdTree(h_points, n, dim, tree); } else { cpuQuickSelect(h_points, n, dim); pointConvert(tree[m], h_points[m]); UpDim(dim); buildKdTreeStep(h_points, m, dim, tree); buildKdTreeStep(h_points + m + 1, n - m - 1, dim, tree + m + 1); } } void buildKdTree(struct Point *h_points, int n, struct Node *tree) { int dim = 0; buildKdTreeStep(h_points, n, dim, tree); store_locations(tree, 0, n, n); }
c108d0f616d95a17228c94dbfd31691cec174bc5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "reduce_moments.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_arr = NULL; hipMalloc(&d_arr, XSIZE*YSIZE); float *d_results = NULL; hipMalloc(&d_results, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( reduce_moments), dim3(gridBlock),dim3(threadBlock), 0, 0, d_arr,d_results,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( reduce_moments), dim3(gridBlock),dim3(threadBlock), 0, 0, d_arr,d_results,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( reduce_moments), dim3(gridBlock),dim3(threadBlock), 0, 0, d_arr,d_results,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c108d0f616d95a17228c94dbfd31691cec174bc5.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "reduce_moments.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_arr = NULL; cudaMalloc(&d_arr, XSIZE*YSIZE); float *d_results = NULL; cudaMalloc(&d_results, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); reduce_moments<<<gridBlock,threadBlock>>>(d_arr,d_results,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { reduce_moments<<<gridBlock,threadBlock>>>(d_arr,d_results,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { reduce_moments<<<gridBlock,threadBlock>>>(d_arr,d_results,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
572dd941e5c509de9736b17667b5e277d7961e19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Macro.h" #include "CUFLU.h" #if ( defined GPU && MODEL == ELBDM ) // useful macros #define to1D1(z,y,x) ( __umul24(z, FLU_NXT*FLU_NXT) + __umul24(y, FLU_NXT) + x ) #define to1D2(z,y,x) ( __umul24(z-FLU_GHOST_SIZE, PS2*PS2) + __umul24(y-FLU_GHOST_SIZE, PS2) + x-FLU_GHOST_SIZE ) #ifdef LAPLACIAN_4TH # define LAP_GHOST 2 # define LAP1(In,i) ( real(1.0/ 12.0)*( - In[i-2] + (real)16.0*In[i-1] - (real)30.0*In[i ] \ - In[i+2] + (real)16.0*In[i+1] ) ) # define LAP2(In,i) ( real(1.0/144.0)*( + In[i-4] - (real)32.0*In[i-3] + (real)316.0*In[i-2] - (real)992.0*In[i-1] \ + In[i+4] - (real)32.0*In[i+3] + (real)316.0*In[i+2] - (real)992.0*In[i+1] \ + (real)1414.0*In[i ] ) ) # ifndef CONSERVE_MASS # define LAP3(In,i) ( real(1.0/1728.0)* \ ( -In[i-6] + (real)48*In[i-5] - (real)858*In[i-4] + (real)7024*In[i-3] - (real)27279*In[i-2] + (real)58464*In[i-1] \ -In[i+6] + (real)48*In[i+5] - (real)858*In[i+4] + (real)7024*In[i+3] - (real)27279*In[i+2] + (real)58464*In[i+1] \ - (real)74796*In[i ] ) ) # endif #else // #ifdef LAPLACIAN_4TH # define LAP_GHOST 1 # define LAP1(In,i) ( + In[i-1] - (real)2.0*In[i ] + In[i+1] ) # define LAP2(In,i) ( + In[i-2] - (real)4.0*In[i-1] + (real)6.0*In[i ] - (real)4.0*In[i+1] + In[i+2] ) # ifndef CONSERVE_MASS # define LAP3(In,i) ( + In[i-3] - (real)6.0*In[i-2] + (real)15.0*In[i-1] - (real)20.0*In[i ] \ + In[i+3] - (real)6.0*In[i+2] + (real)15.0*In[i+1] ) # endif #endif // #ifdef LAPLACIAN_4TH ... else ... static __device__ void CUFLU_Advance( real g_Fluid_In [][FLU_NIN ][ CUBE(FLU_NXT) ], real g_Fluid_Out[][FLU_NOUT][ CUBE(PS2) ], real g_Flux [][9][NFLUX_TOTAL][ SQR(PS2) ], const real dt, const real _dh, const real Eta, const bool StoreFlux, const real Taylor3_Coeff, const uint j_gap, const uint k_gap, real s_In[][FLU_BLOCK_SIZE_Y][FLU_NXT], real s_Half[][FLU_BLOCK_SIZE_Y][FLU_NXT], real s_Flux[][PS2+1], const bool FinalOut, const int XYZ, const real MinDens ); //------------------------------------------------------------------------------------------------------- // Function : CUFLU_ELBDMSolver // Description : GPU ELBDM kinematic solver based on expanding the propagator to 3rd order // // Note : 1. The three-dimensional evolution is achieved by applying x, y, and z operators successively. // Since these operators commute, the order of applying them are irrelevant. // --> Input pamameter "XYZ" is actually useless // --> Nevertheless, the symmetry in different directions will be broken if CONSERVE_MASS is on // 2. The implementation is very similar to the function " CUFLU_FluidSolver_RTVD" // 4. Prefix "g" for pointers pointing to the "Global" memory space // Prefix "s" for pointers pointing to the "Shared" memory space // // Parameter : g_Fluid_In : Global memory array storing the input variables // g_Fluid_Out : Global memory array to store the output variables // g_Flux : Global memory array to store the output fluxes (useful only if StoreFlux == true) // dt : Time interval to advance solution // _dh : 1 / grid size // Eta : Particle mass / Planck constant // StoreFlux : true --> store the coarse-fine fluxes // --> useful only if CONSERVE_MASS is defined // Taylor3_Coeff : Coefficient in front of the third term in the Taylor expansion // XYZ : true : x->y->z ( forward sweep) // false : z->y->x (backward sweep) // --> Meaningless if CONSERVE_MASS is off since the operators along different directions // commute // --> Meaningful if CONSERVE_MASS is on, in which the symmetry along different directions // are broken ... // MinDens : Minimum allowed density //------------------------------------------------------------------------------------------------------- __global__ void CUFLU_ELBDMSolver( real g_Fluid_In [][FLU_NIN ][ CUBE(FLU_NXT) ], real g_Fluid_Out[][FLU_NOUT][ CUBE(PS2) ], real g_Flux [][9][NFLUX_TOTAL][ SQR(PS2) ], const real dt, const real _dh, const real Eta, const bool StoreFlux, const real Taylor3_Coeff, const bool XYZ, const real MinDens ) { __shared__ real s_In [FLU_NIN][FLU_BLOCK_SIZE_Y][FLU_NXT]; # ifdef CONSERVE_MASS __shared__ real s_Half[FLU_NIN][FLU_BLOCK_SIZE_Y][FLU_NXT]; __shared__ real s_Flux[FLU_BLOCK_SIZE_Y][PS2+1]; # else real (*s_Half)[FLU_BLOCK_SIZE_Y][FLU_NXT] = NULL; // useless if CONSERVE_MASS is off real (*s_Flux)[PS2+1] = NULL; // useless if CONSERVE_MASS is off # endif if ( XYZ ) { CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Eta, StoreFlux, Taylor3_Coeff, 0, 0, s_In, s_Half, s_Flux, false, 0, MinDens ); CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Eta, StoreFlux, Taylor3_Coeff, FLU_GHOST_SIZE, 0, s_In, s_Half, s_Flux, false, 3, MinDens ); CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Eta, StoreFlux, Taylor3_Coeff, FLU_GHOST_SIZE, FLU_GHOST_SIZE, s_In, s_Half, s_Flux, true, 6, MinDens ); } else { CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Eta, StoreFlux, Taylor3_Coeff, 0, 0, s_In, s_Half, s_Flux, false, 6, MinDens ); CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Eta, StoreFlux, Taylor3_Coeff, 0, FLU_GHOST_SIZE, s_In, s_Half, s_Flux, false, 3, MinDens ); CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Eta, StoreFlux, Taylor3_Coeff, FLU_GHOST_SIZE, FLU_GHOST_SIZE, s_In, s_Half, s_Flux, true, 0, MinDens ); } } // FUNCTION : CUFLU_ELBDMSolver //------------------------------------------------------------------------------------------------------- // Function : CUFLU_Advance // Description : Use GPU to advance solutions by one time-step // // Note : 1. Based on expanding the kinematic propagator to 3rd order // 2. Prefix "g" for pointers pointing to the "Global" memory space // Prefix "s" for pointers pointing to the "Shared" memory space // 3. The direction of the one dimensional sweep is determined by the input parameter "XYZ" // // Parameter : g_Fluid_In : Global memory array storing the input variables // g_Fluid_Out : Global memory array to store the output variables // g_Flux : Global memory array to store the output fluxes (useful only if StoreFlux == true) // dt : Time interval to advance solution // _dh : 1 / grid size // Eta : Particle mass / Planck constant // StoreFlux : true --> store the coarse-fine fluxes // --> useful only if CONSERVE_MASS is defined // Taylor3_Coeff : Coefficient in front of the third term in the Taylor expansion // j_gap : Number of useless grids on each side in the j direction (j may not be equal to y) // k_gap : Number of useless grids on each side in the k direction (k mya not be equal to z) // s_In : Shared memory array to store the input data // s_Half : Shared memory array to store the half-step solution // s_Flux : Shared memory array to store the boundary fluxes // FinalOut : true --> store the updated data to g_Fluid_Out // XYZ : 0 : Update the solution in the x direction // 3 : Update the solution in the y direction // 6 : Update the solution in the z direction // --> This parameter is also used to determine the place to store the output fluxes // MinDens : Minimum allowed density //------------------------------------------------------------------------------------------------------- __device__ void CUFLU_Advance( real g_Fluid_In [][FLU_NIN ][ CUBE(FLU_NXT) ], real g_Fluid_Out[][FLU_NOUT][ CUBE(PS2) ], real g_Flux [][9][NFLUX_TOTAL][ SQR(PS2) ], const real dt, const real _dh, const real Eta, const bool StoreFlux, const real Taylor3_Coeff, const uint j_gap, const uint k_gap, real s_In[][FLU_BLOCK_SIZE_Y][FLU_NXT], real s_Half[][FLU_BLOCK_SIZE_Y][FLU_NXT], real s_Flux[][PS2+1], const bool FinalOut, const int XYZ, const real MinDens ) { const real _Eta = (real)1.0/Eta; const real dT = (real)0.5*dt*_Eta; const real _Eta2_dh = (real)0.5*_dh*_Eta; const real Coeff1 = dT*_dh*_dh; # ifdef CONSERVE_MASS const real Coeff2 = Taylor3_Coeff*SQR(Coeff1); # else const real Coeff2 = (real)0.5*SQR(Coeff1); const real Coeff3 = Taylor3_Coeff*CUBE(Coeff1); # endif const uint bx = blockIdx.x; const uint tx = threadIdx.x; const uint ty = threadIdx.y; const uint tid = __umul24(ty,FLU_BLOCK_SIZE_X) + tx; const uint size_j = FLU_NXT - (j_gap<<1); const uint size_k = FLU_NXT - (k_gap<<1); const uint NColumnTotal = __umul24( size_j, size_k ); // total number of data columns to be updated const uint i = tx + FLU_GHOST_SIZE; // (i,j,k): array indices used in g_Fluid_In const uint j_end = FLU_NXT - j_gap; uint j = j_gap + ty%size_j; uint k = k_gap + ty/size_j; uint Column0 = 0; // the total number of columns that have been updated uint NColumnOnce = MIN( NColumnTotal, FLU_BLOCK_SIZE_Y ); double Amp_New; // use double precision to reduce the round-off error in the mass conservation real Re_Old, Im_Old, Re_New, Im_New; uint Idx1, Idx2, Idx3, delta_k; # ifdef CONSERVE_MASS const uint NThread = FLU_BLOCK_SIZE_X*FLU_BLOCK_SIZE_Y; const uint NHalf = FLU_NXT - 4*LAP_GHOST; const real dT_dh2 = dT*_dh*_dh; const uint txp = tx + 1; double Amp_Old, Amp_Corr; // use double precision to reduce the round-off error in the mass conservation real R, I, dR, dI; uint Idx; uint si, sj; // array indices used in the shared memory array uint f, fp1; // array indices used in the s_Flux array # ifdef LAPLACIAN_4TH uint fm1, fp2; # endif # endif // #ifdef CONSERVE_MASS // determine the array indices for loading the ghost-zone data bool LoadGhost = false; // true --> load the ghost-zone data uint LoadGhost_i; int LoadGhost_di, LoadGhost_dIdx1; if ( tx < 2*FLU_GHOST_SIZE ) { LoadGhost = true; if ( tx < FLU_GHOST_SIZE ) LoadGhost_di = -FLU_GHOST_SIZE; else LoadGhost_di = -FLU_GHOST_SIZE + PS2; switch ( XYZ ) { case 0: LoadGhost_dIdx1 = LoadGhost_di; break; case 3: LoadGhost_dIdx1 = __mul24( LoadGhost_di, FLU_NXT ); break; case 6: LoadGhost_dIdx1 = __mul24( LoadGhost_di, FLU_NXT*FLU_NXT ); break; } LoadGhost_i = (int)i + LoadGhost_di; } // if ( tx < 2*FLU_GHOST_SIZE ) // loop over all data columns while ( Column0 < NColumnTotal ) { // 1. load data into shared memory if ( tid < NColumnOnce*PS2 ) { // 1.1 determine the array indices for loading global memory data along different directions switch ( XYZ ) { case 0: Idx1 = to1D1( k, j, i ); break; case 3: Idx1 = to1D1( k, i, j ); break; case 6: Idx1 = to1D1( i, k, j ); break; } // 1.2 load the interior data into shared memory Re_Old = g_Fluid_In[bx][0][Idx1]; Im_Old = g_Fluid_In[bx][1][Idx1]; s_In[0][ty][i] = Re_Old; s_In[1][ty][i] = Im_Old; // 1.3 load the ghost-zone data into shared memory if ( LoadGhost ) { s_In[0][ty][LoadGhost_i] = g_Fluid_In[bx][0][ (int)Idx1 + LoadGhost_dIdx1 ]; s_In[1][ty][LoadGhost_i] = g_Fluid_In[bx][1][ (int)Idx1 + LoadGhost_dIdx1 ]; } } // if ( tid < NColumnOnce*PS2 ) __syncthreads(); # ifdef CONSERVE_MASS // 2. half-step solution Idx = tid; while ( Idx < NColumnOnce*NHalf ) { si = Idx % NHalf + 2*LAP_GHOST; sj = Idx / NHalf; s_Half[0][sj][si] = s_In[0][sj][si] - (real)0.5*Coeff1*LAP1( s_In[1][sj], si ) - Coeff2*LAP2( s_In[0][sj], si ); s_Half[1][sj][si] = s_In[1][sj][si] + (real)0.5*Coeff1*LAP1( s_In[0][sj], si ) - Coeff2*LAP2( s_In[1][sj], si ); Idx += NThread; } // while ( Idx < NColumnOnce*NHalf ) __syncthreads(); // 3. calculate the face-center fluxes (the coefficient _dh has been absorted into the constant dT_dh2) Idx = tid; while ( Idx < NColumnOnce*(PS2+1) ) { si = Idx % (PS2+1); sj = Idx / (PS2+1); f = si + FLU_GHOST_SIZE - 1; fp1 = f + 1; # ifdef LAPLACIAN_4TH fm1 = f - 1; fp2 = f + 2; R = real(1./28.)*( -s_Half[0][sj][fm1]+(real)15*s_Half[0][sj][f]+(real)15*s_Half[0][sj][fp1]-s_Half[0][sj][fp2] ); I = real(1./28.)*( -s_Half[1][sj][fm1]+(real)15*s_Half[1][sj][f]+(real)15*s_Half[1][sj][fp1]-s_Half[1][sj][fp2] ); dR = real(1./12.)*( +s_Half[0][sj][fm1]-(real)15*s_Half[0][sj][f]+(real)15*s_Half[0][sj][fp1]-s_Half[0][sj][fp2] ); dI = real(1./12.)*( +s_Half[1][sj][fm1]-(real)15*s_Half[1][sj][f]+(real)15*s_Half[1][sj][fp1]-s_Half[1][sj][fp2] ); # else R = real(0.5)*( + s_Half[0][sj][f] + s_Half[0][sj][fp1] ); I = real(0.5)*( + s_Half[1][sj][f] + s_Half[1][sj][fp1] ); dR = ( - s_Half[0][sj][f] + s_Half[0][sj][fp1] ); dI = ( - s_Half[1][sj][f] + s_Half[1][sj][fp1] ); # endif s_Flux[sj][si] = (real)2.0*( R*dI - I*dR ); Idx += NThread; } // while ( Idx < NColumnOnce*(PS2+1) ) __syncthreads(); // 4a. full-step solution (equivalent to the 3rd-order Taylor expansion) if ( tid < NColumnOnce*PS2 ) { Re_New = Re_Old - Coeff1*LAP1( s_Half[1][ty], i ); Im_New = Im_Old + Coeff1*LAP1( s_Half[0][ty], i ); Amp_Old = SQR( Re_Old ) + SQR( Im_Old ); Amp_New = SQR( Re_New ) + SQR( Im_New ); Amp_Corr = Amp_Old - dT_dh2*( s_Flux[ty][txp] - s_Flux[ty][tx] ); // be careful about the negative density and the vacuum (where we might have Amp_New == 0.0) // if ( Amp_Corr > (real)0.0 && Amp_New > (real)0.0 ) if ( Amp_Corr > 0.0 && Amp_New > 0.0 ) { /* Re_New *= SQRT( Amp_Corr / Amp_New ); Im_New *= SQRT( Amp_Corr / Amp_New ); */ Re_New *= sqrt( Amp_Corr / Amp_New ); // use double precision to improve the mass conservation further Im_New *= sqrt( Amp_Corr / Amp_New ); Amp_New = Amp_Corr; } } // if if ( tid < NColumnOnce*PS2 ) # else // CONSERVE_MASS // 4b. full-step solution if CONSERVE_MASS is not defined (equivalent to the 3rd-order Taylor expansion) if ( tid < NColumnOnce*PS2 ) { Re_New = Re_Old - Coeff1*LAP1( s_In[1][ty], i ) - Coeff2*LAP2( s_In[0][ty], i ) + Coeff3*LAP3( s_In[1][ty], i ); Im_New = Im_Old + Coeff1*LAP1( s_In[0][ty], i ) - Coeff2*LAP2( s_In[1][ty], i ) - Coeff3*LAP3( s_In[0][ty], i ); Amp_New = SQR( Re_New ) + SQR( Im_New ); } # endif // CONSERVE_MASS ... else ... // 5. store the updated data (and fluxes) back to the global memory if ( tid < NColumnOnce*PS2 ) { // 5.1 data if ( FinalOut ) { // apply the the minimum density check // --> to be consistent with the CPU solver, we apply it just before storing the output results to g_Fluid_Out if ( Amp_New < MinDens ) { const real Rescale = SQRT( MinDens / (real)Amp_New ); Re_New *= Rescale; Im_New *= Rescale; Amp_New = MinDens; } switch ( XYZ ) { case 0: Idx2 = to1D2( k, j, i ); break; case 3: Idx2 = to1D2( k, i, j ); break; case 6: Idx2 = to1D2( i, k, j ); break; } g_Fluid_Out[bx][0][Idx2] = Amp_New; g_Fluid_Out[bx][1][Idx2] = Re_New; g_Fluid_Out[bx][2][Idx2] = Im_New; } else { g_Fluid_In[bx][0][Idx1] = Re_New; g_Fluid_In[bx][1][Idx1] = Im_New; } // 5.2 fluxes (for the flux-correction operation) if ( StoreFlux && tx == 0 ) if ( k >= FLU_GHOST_SIZE && k < FLU_NXT-FLU_GHOST_SIZE ) if ( j >= FLU_GHOST_SIZE && j < FLU_NXT-FLU_GHOST_SIZE ) { Idx3 = __umul24( k-FLU_GHOST_SIZE, PS2 ) + (j-FLU_GHOST_SIZE); g_Flux[bx][XYZ+0][0][Idx3] = s_Flux[ty][ 0]*_Eta2_dh; g_Flux[bx][XYZ+1][0][Idx3] = s_Flux[ty][PS1]*_Eta2_dh; g_Flux[bx][XYZ+2][0][Idx3] = s_Flux[ty][PS2]*_Eta2_dh; } // 5.3 reset the target array indices j += NColumnOnce; if ( j >= j_end ) { delta_k = ( j - j_end )/size_j + 1; k += delta_k; j -= __umul24( size_j, delta_k ); } } // if ( tid < NColumnOnce*PS2 ) __syncthreads(); Column0 += NColumnOnce; NColumnOnce = MIN( NColumnTotal - Column0, FLU_BLOCK_SIZE_Y ); } // while ( Column0 < NColumnTotal ) } // FUNCTION : CUFLU_Advance #endif // #if ( defined GPU && MODEL == ELBDM )
572dd941e5c509de9736b17667b5e277d7961e19.cu
#include "Macro.h" #include "CUFLU.h" #if ( defined GPU && MODEL == ELBDM ) // useful macros #define to1D1(z,y,x) ( __umul24(z, FLU_NXT*FLU_NXT) + __umul24(y, FLU_NXT) + x ) #define to1D2(z,y,x) ( __umul24(z-FLU_GHOST_SIZE, PS2*PS2) + __umul24(y-FLU_GHOST_SIZE, PS2) + x-FLU_GHOST_SIZE ) #ifdef LAPLACIAN_4TH # define LAP_GHOST 2 # define LAP1(In,i) ( real(1.0/ 12.0)*( - In[i-2] + (real)16.0*In[i-1] - (real)30.0*In[i ] \ - In[i+2] + (real)16.0*In[i+1] ) ) # define LAP2(In,i) ( real(1.0/144.0)*( + In[i-4] - (real)32.0*In[i-3] + (real)316.0*In[i-2] - (real)992.0*In[i-1] \ + In[i+4] - (real)32.0*In[i+3] + (real)316.0*In[i+2] - (real)992.0*In[i+1] \ + (real)1414.0*In[i ] ) ) # ifndef CONSERVE_MASS # define LAP3(In,i) ( real(1.0/1728.0)* \ ( -In[i-6] + (real)48*In[i-5] - (real)858*In[i-4] + (real)7024*In[i-3] - (real)27279*In[i-2] + (real)58464*In[i-1] \ -In[i+6] + (real)48*In[i+5] - (real)858*In[i+4] + (real)7024*In[i+3] - (real)27279*In[i+2] + (real)58464*In[i+1] \ - (real)74796*In[i ] ) ) # endif #else // #ifdef LAPLACIAN_4TH # define LAP_GHOST 1 # define LAP1(In,i) ( + In[i-1] - (real)2.0*In[i ] + In[i+1] ) # define LAP2(In,i) ( + In[i-2] - (real)4.0*In[i-1] + (real)6.0*In[i ] - (real)4.0*In[i+1] + In[i+2] ) # ifndef CONSERVE_MASS # define LAP3(In,i) ( + In[i-3] - (real)6.0*In[i-2] + (real)15.0*In[i-1] - (real)20.0*In[i ] \ + In[i+3] - (real)6.0*In[i+2] + (real)15.0*In[i+1] ) # endif #endif // #ifdef LAPLACIAN_4TH ... else ... static __device__ void CUFLU_Advance( real g_Fluid_In [][FLU_NIN ][ CUBE(FLU_NXT) ], real g_Fluid_Out[][FLU_NOUT][ CUBE(PS2) ], real g_Flux [][9][NFLUX_TOTAL][ SQR(PS2) ], const real dt, const real _dh, const real Eta, const bool StoreFlux, const real Taylor3_Coeff, const uint j_gap, const uint k_gap, real s_In[][FLU_BLOCK_SIZE_Y][FLU_NXT], real s_Half[][FLU_BLOCK_SIZE_Y][FLU_NXT], real s_Flux[][PS2+1], const bool FinalOut, const int XYZ, const real MinDens ); //------------------------------------------------------------------------------------------------------- // Function : CUFLU_ELBDMSolver // Description : GPU ELBDM kinematic solver based on expanding the propagator to 3rd order // // Note : 1. The three-dimensional evolution is achieved by applying x, y, and z operators successively. // Since these operators commute, the order of applying them are irrelevant. // --> Input pamameter "XYZ" is actually useless // --> Nevertheless, the symmetry in different directions will be broken if CONSERVE_MASS is on // 2. The implementation is very similar to the function " CUFLU_FluidSolver_RTVD" // 4. Prefix "g" for pointers pointing to the "Global" memory space // Prefix "s" for pointers pointing to the "Shared" memory space // // Parameter : g_Fluid_In : Global memory array storing the input variables // g_Fluid_Out : Global memory array to store the output variables // g_Flux : Global memory array to store the output fluxes (useful only if StoreFlux == true) // dt : Time interval to advance solution // _dh : 1 / grid size // Eta : Particle mass / Planck constant // StoreFlux : true --> store the coarse-fine fluxes // --> useful only if CONSERVE_MASS is defined // Taylor3_Coeff : Coefficient in front of the third term in the Taylor expansion // XYZ : true : x->y->z ( forward sweep) // false : z->y->x (backward sweep) // --> Meaningless if CONSERVE_MASS is off since the operators along different directions // commute // --> Meaningful if CONSERVE_MASS is on, in which the symmetry along different directions // are broken ... // MinDens : Minimum allowed density //------------------------------------------------------------------------------------------------------- __global__ void CUFLU_ELBDMSolver( real g_Fluid_In [][FLU_NIN ][ CUBE(FLU_NXT) ], real g_Fluid_Out[][FLU_NOUT][ CUBE(PS2) ], real g_Flux [][9][NFLUX_TOTAL][ SQR(PS2) ], const real dt, const real _dh, const real Eta, const bool StoreFlux, const real Taylor3_Coeff, const bool XYZ, const real MinDens ) { __shared__ real s_In [FLU_NIN][FLU_BLOCK_SIZE_Y][FLU_NXT]; # ifdef CONSERVE_MASS __shared__ real s_Half[FLU_NIN][FLU_BLOCK_SIZE_Y][FLU_NXT]; __shared__ real s_Flux[FLU_BLOCK_SIZE_Y][PS2+1]; # else real (*s_Half)[FLU_BLOCK_SIZE_Y][FLU_NXT] = NULL; // useless if CONSERVE_MASS is off real (*s_Flux)[PS2+1] = NULL; // useless if CONSERVE_MASS is off # endif if ( XYZ ) { CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Eta, StoreFlux, Taylor3_Coeff, 0, 0, s_In, s_Half, s_Flux, false, 0, MinDens ); CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Eta, StoreFlux, Taylor3_Coeff, FLU_GHOST_SIZE, 0, s_In, s_Half, s_Flux, false, 3, MinDens ); CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Eta, StoreFlux, Taylor3_Coeff, FLU_GHOST_SIZE, FLU_GHOST_SIZE, s_In, s_Half, s_Flux, true, 6, MinDens ); } else { CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Eta, StoreFlux, Taylor3_Coeff, 0, 0, s_In, s_Half, s_Flux, false, 6, MinDens ); CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Eta, StoreFlux, Taylor3_Coeff, 0, FLU_GHOST_SIZE, s_In, s_Half, s_Flux, false, 3, MinDens ); CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, Eta, StoreFlux, Taylor3_Coeff, FLU_GHOST_SIZE, FLU_GHOST_SIZE, s_In, s_Half, s_Flux, true, 0, MinDens ); } } // FUNCTION : CUFLU_ELBDMSolver //------------------------------------------------------------------------------------------------------- // Function : CUFLU_Advance // Description : Use GPU to advance solutions by one time-step // // Note : 1. Based on expanding the kinematic propagator to 3rd order // 2. Prefix "g" for pointers pointing to the "Global" memory space // Prefix "s" for pointers pointing to the "Shared" memory space // 3. The direction of the one dimensional sweep is determined by the input parameter "XYZ" // // Parameter : g_Fluid_In : Global memory array storing the input variables // g_Fluid_Out : Global memory array to store the output variables // g_Flux : Global memory array to store the output fluxes (useful only if StoreFlux == true) // dt : Time interval to advance solution // _dh : 1 / grid size // Eta : Particle mass / Planck constant // StoreFlux : true --> store the coarse-fine fluxes // --> useful only if CONSERVE_MASS is defined // Taylor3_Coeff : Coefficient in front of the third term in the Taylor expansion // j_gap : Number of useless grids on each side in the j direction (j may not be equal to y) // k_gap : Number of useless grids on each side in the k direction (k mya not be equal to z) // s_In : Shared memory array to store the input data // s_Half : Shared memory array to store the half-step solution // s_Flux : Shared memory array to store the boundary fluxes // FinalOut : true --> store the updated data to g_Fluid_Out // XYZ : 0 : Update the solution in the x direction // 3 : Update the solution in the y direction // 6 : Update the solution in the z direction // --> This parameter is also used to determine the place to store the output fluxes // MinDens : Minimum allowed density //------------------------------------------------------------------------------------------------------- __device__ void CUFLU_Advance( real g_Fluid_In [][FLU_NIN ][ CUBE(FLU_NXT) ], real g_Fluid_Out[][FLU_NOUT][ CUBE(PS2) ], real g_Flux [][9][NFLUX_TOTAL][ SQR(PS2) ], const real dt, const real _dh, const real Eta, const bool StoreFlux, const real Taylor3_Coeff, const uint j_gap, const uint k_gap, real s_In[][FLU_BLOCK_SIZE_Y][FLU_NXT], real s_Half[][FLU_BLOCK_SIZE_Y][FLU_NXT], real s_Flux[][PS2+1], const bool FinalOut, const int XYZ, const real MinDens ) { const real _Eta = (real)1.0/Eta; const real dT = (real)0.5*dt*_Eta; const real _Eta2_dh = (real)0.5*_dh*_Eta; const real Coeff1 = dT*_dh*_dh; # ifdef CONSERVE_MASS const real Coeff2 = Taylor3_Coeff*SQR(Coeff1); # else const real Coeff2 = (real)0.5*SQR(Coeff1); const real Coeff3 = Taylor3_Coeff*CUBE(Coeff1); # endif const uint bx = blockIdx.x; const uint tx = threadIdx.x; const uint ty = threadIdx.y; const uint tid = __umul24(ty,FLU_BLOCK_SIZE_X) + tx; const uint size_j = FLU_NXT - (j_gap<<1); const uint size_k = FLU_NXT - (k_gap<<1); const uint NColumnTotal = __umul24( size_j, size_k ); // total number of data columns to be updated const uint i = tx + FLU_GHOST_SIZE; // (i,j,k): array indices used in g_Fluid_In const uint j_end = FLU_NXT - j_gap; uint j = j_gap + ty%size_j; uint k = k_gap + ty/size_j; uint Column0 = 0; // the total number of columns that have been updated uint NColumnOnce = MIN( NColumnTotal, FLU_BLOCK_SIZE_Y ); double Amp_New; // use double precision to reduce the round-off error in the mass conservation real Re_Old, Im_Old, Re_New, Im_New; uint Idx1, Idx2, Idx3, delta_k; # ifdef CONSERVE_MASS const uint NThread = FLU_BLOCK_SIZE_X*FLU_BLOCK_SIZE_Y; const uint NHalf = FLU_NXT - 4*LAP_GHOST; const real dT_dh2 = dT*_dh*_dh; const uint txp = tx + 1; double Amp_Old, Amp_Corr; // use double precision to reduce the round-off error in the mass conservation real R, I, dR, dI; uint Idx; uint si, sj; // array indices used in the shared memory array uint f, fp1; // array indices used in the s_Flux array # ifdef LAPLACIAN_4TH uint fm1, fp2; # endif # endif // #ifdef CONSERVE_MASS // determine the array indices for loading the ghost-zone data bool LoadGhost = false; // true --> load the ghost-zone data uint LoadGhost_i; int LoadGhost_di, LoadGhost_dIdx1; if ( tx < 2*FLU_GHOST_SIZE ) { LoadGhost = true; if ( tx < FLU_GHOST_SIZE ) LoadGhost_di = -FLU_GHOST_SIZE; else LoadGhost_di = -FLU_GHOST_SIZE + PS2; switch ( XYZ ) { case 0: LoadGhost_dIdx1 = LoadGhost_di; break; case 3: LoadGhost_dIdx1 = __mul24( LoadGhost_di, FLU_NXT ); break; case 6: LoadGhost_dIdx1 = __mul24( LoadGhost_di, FLU_NXT*FLU_NXT ); break; } LoadGhost_i = (int)i + LoadGhost_di; } // if ( tx < 2*FLU_GHOST_SIZE ) // loop over all data columns while ( Column0 < NColumnTotal ) { // 1. load data into shared memory if ( tid < NColumnOnce*PS2 ) { // 1.1 determine the array indices for loading global memory data along different directions switch ( XYZ ) { case 0: Idx1 = to1D1( k, j, i ); break; case 3: Idx1 = to1D1( k, i, j ); break; case 6: Idx1 = to1D1( i, k, j ); break; } // 1.2 load the interior data into shared memory Re_Old = g_Fluid_In[bx][0][Idx1]; Im_Old = g_Fluid_In[bx][1][Idx1]; s_In[0][ty][i] = Re_Old; s_In[1][ty][i] = Im_Old; // 1.3 load the ghost-zone data into shared memory if ( LoadGhost ) { s_In[0][ty][LoadGhost_i] = g_Fluid_In[bx][0][ (int)Idx1 + LoadGhost_dIdx1 ]; s_In[1][ty][LoadGhost_i] = g_Fluid_In[bx][1][ (int)Idx1 + LoadGhost_dIdx1 ]; } } // if ( tid < NColumnOnce*PS2 ) __syncthreads(); # ifdef CONSERVE_MASS // 2. half-step solution Idx = tid; while ( Idx < NColumnOnce*NHalf ) { si = Idx % NHalf + 2*LAP_GHOST; sj = Idx / NHalf; s_Half[0][sj][si] = s_In[0][sj][si] - (real)0.5*Coeff1*LAP1( s_In[1][sj], si ) - Coeff2*LAP2( s_In[0][sj], si ); s_Half[1][sj][si] = s_In[1][sj][si] + (real)0.5*Coeff1*LAP1( s_In[0][sj], si ) - Coeff2*LAP2( s_In[1][sj], si ); Idx += NThread; } // while ( Idx < NColumnOnce*NHalf ) __syncthreads(); // 3. calculate the face-center fluxes (the coefficient _dh has been absorted into the constant dT_dh2) Idx = tid; while ( Idx < NColumnOnce*(PS2+1) ) { si = Idx % (PS2+1); sj = Idx / (PS2+1); f = si + FLU_GHOST_SIZE - 1; fp1 = f + 1; # ifdef LAPLACIAN_4TH fm1 = f - 1; fp2 = f + 2; R = real(1./28.)*( -s_Half[0][sj][fm1]+(real)15*s_Half[0][sj][f]+(real)15*s_Half[0][sj][fp1]-s_Half[0][sj][fp2] ); I = real(1./28.)*( -s_Half[1][sj][fm1]+(real)15*s_Half[1][sj][f]+(real)15*s_Half[1][sj][fp1]-s_Half[1][sj][fp2] ); dR = real(1./12.)*( +s_Half[0][sj][fm1]-(real)15*s_Half[0][sj][f]+(real)15*s_Half[0][sj][fp1]-s_Half[0][sj][fp2] ); dI = real(1./12.)*( +s_Half[1][sj][fm1]-(real)15*s_Half[1][sj][f]+(real)15*s_Half[1][sj][fp1]-s_Half[1][sj][fp2] ); # else R = real(0.5)*( + s_Half[0][sj][f] + s_Half[0][sj][fp1] ); I = real(0.5)*( + s_Half[1][sj][f] + s_Half[1][sj][fp1] ); dR = ( - s_Half[0][sj][f] + s_Half[0][sj][fp1] ); dI = ( - s_Half[1][sj][f] + s_Half[1][sj][fp1] ); # endif s_Flux[sj][si] = (real)2.0*( R*dI - I*dR ); Idx += NThread; } // while ( Idx < NColumnOnce*(PS2+1) ) __syncthreads(); // 4a. full-step solution (equivalent to the 3rd-order Taylor expansion) if ( tid < NColumnOnce*PS2 ) { Re_New = Re_Old - Coeff1*LAP1( s_Half[1][ty], i ); Im_New = Im_Old + Coeff1*LAP1( s_Half[0][ty], i ); Amp_Old = SQR( Re_Old ) + SQR( Im_Old ); Amp_New = SQR( Re_New ) + SQR( Im_New ); Amp_Corr = Amp_Old - dT_dh2*( s_Flux[ty][txp] - s_Flux[ty][tx] ); // be careful about the negative density and the vacuum (where we might have Amp_New == 0.0) // if ( Amp_Corr > (real)0.0 && Amp_New > (real)0.0 ) if ( Amp_Corr > 0.0 && Amp_New > 0.0 ) { /* Re_New *= SQRT( Amp_Corr / Amp_New ); Im_New *= SQRT( Amp_Corr / Amp_New ); */ Re_New *= sqrt( Amp_Corr / Amp_New ); // use double precision to improve the mass conservation further Im_New *= sqrt( Amp_Corr / Amp_New ); Amp_New = Amp_Corr; } } // if if ( tid < NColumnOnce*PS2 ) # else // CONSERVE_MASS // 4b. full-step solution if CONSERVE_MASS is not defined (equivalent to the 3rd-order Taylor expansion) if ( tid < NColumnOnce*PS2 ) { Re_New = Re_Old - Coeff1*LAP1( s_In[1][ty], i ) - Coeff2*LAP2( s_In[0][ty], i ) + Coeff3*LAP3( s_In[1][ty], i ); Im_New = Im_Old + Coeff1*LAP1( s_In[0][ty], i ) - Coeff2*LAP2( s_In[1][ty], i ) - Coeff3*LAP3( s_In[0][ty], i ); Amp_New = SQR( Re_New ) + SQR( Im_New ); } # endif // CONSERVE_MASS ... else ... // 5. store the updated data (and fluxes) back to the global memory if ( tid < NColumnOnce*PS2 ) { // 5.1 data if ( FinalOut ) { // apply the the minimum density check // --> to be consistent with the CPU solver, we apply it just before storing the output results to g_Fluid_Out if ( Amp_New < MinDens ) { const real Rescale = SQRT( MinDens / (real)Amp_New ); Re_New *= Rescale; Im_New *= Rescale; Amp_New = MinDens; } switch ( XYZ ) { case 0: Idx2 = to1D2( k, j, i ); break; case 3: Idx2 = to1D2( k, i, j ); break; case 6: Idx2 = to1D2( i, k, j ); break; } g_Fluid_Out[bx][0][Idx2] = Amp_New; g_Fluid_Out[bx][1][Idx2] = Re_New; g_Fluid_Out[bx][2][Idx2] = Im_New; } else { g_Fluid_In[bx][0][Idx1] = Re_New; g_Fluid_In[bx][1][Idx1] = Im_New; } // 5.2 fluxes (for the flux-correction operation) if ( StoreFlux && tx == 0 ) if ( k >= FLU_GHOST_SIZE && k < FLU_NXT-FLU_GHOST_SIZE ) if ( j >= FLU_GHOST_SIZE && j < FLU_NXT-FLU_GHOST_SIZE ) { Idx3 = __umul24( k-FLU_GHOST_SIZE, PS2 ) + (j-FLU_GHOST_SIZE); g_Flux[bx][XYZ+0][0][Idx3] = s_Flux[ty][ 0]*_Eta2_dh; g_Flux[bx][XYZ+1][0][Idx3] = s_Flux[ty][PS1]*_Eta2_dh; g_Flux[bx][XYZ+2][0][Idx3] = s_Flux[ty][PS2]*_Eta2_dh; } // 5.3 reset the target array indices j += NColumnOnce; if ( j >= j_end ) { delta_k = ( j - j_end )/size_j + 1; k += delta_k; j -= __umul24( size_j, delta_k ); } } // if ( tid < NColumnOnce*PS2 ) __syncthreads(); Column0 += NColumnOnce; NColumnOnce = MIN( NColumnTotal - Column0, FLU_BLOCK_SIZE_Y ); } // while ( Column0 < NColumnTotal ) } // FUNCTION : CUFLU_Advance #endif // #if ( defined GPU && MODEL == ELBDM )
fb6f811484b5b8008f6f823e0781a7040ea5a447.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void scaleVector(float *d_res, const float *d_src, float scale, const int len) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= len) return; d_res[pos] = d_src[pos] * scale; }
fb6f811484b5b8008f6f823e0781a7040ea5a447.cu
#include "includes.h" __global__ void scaleVector(float *d_res, const float *d_src, float scale, const int len) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= len) return; d_res[pos] = d_src[pos] * scale; }
ee402021a4ae1ba805d0d58857eac3a76ac8950b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void Subtract( float * x, size_t idx, size_t N, float W0, float W1) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { x[(idx-2)*N+i] = W0*x[(idx-1)*N+i] - W1*x[(idx-2)*N+i]; } return; }
ee402021a4ae1ba805d0d58857eac3a76ac8950b.cu
#include "includes.h" __global__ void Subtract( float * x, size_t idx, size_t N, float W0, float W1) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { x[(idx-2)*N+i] = W0*x[(idx-1)*N+i] - W1*x[(idx-2)*N+i]; } return; }
2e8f172ce638c54d8a8ba2a4aa39e501b763e3cf.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> //#include <cutil_inline.h> extern "C" void runCudaPart(float a[], float b[], float c[], int n); __global__ void myKernel(float *a, float *b, float *c, int n) { int idx = blockIdx.x*blockDim.x + threadIdx.x; //return; if (idx < n) { c[idx] = a[idx] + b[idx]; //c[idx] = 10.0; } //printf("Test\n"); return; } // Main cuda function void runCudaPart(float *a, float *b, float *c, int n) { float *a_d, *b_d, *c_d; size_t size = n * sizeof(float); hipMalloc((void **) &a_d, size); hipMalloc((void **) &b_d, size); hipMalloc((void **) &c_d, size); hipMemcpy(a_d, a, size,hipMemcpyHostToDevice); hipMemcpy(b_d, b, size,hipMemcpyHostToDevice); hipMemcpy(c_d, c, size,hipMemcpyHostToDevice); //hipMemset(c_d, 0, n); printf("Executing CUDA kernel\n"); hipLaunchKernelGGL(( myKernel) , dim3(1),dim3(100), 0, 0, a_d, b_d, c_d, n); hipMemcpy(c, c_d, size, hipMemcpyDeviceToHost); printf("Kernel ended.\n"); }
2e8f172ce638c54d8a8ba2a4aa39e501b763e3cf.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> //#include <cutil_inline.h> extern "C" void runCudaPart(float a[], float b[], float c[], int n); __global__ void myKernel(float *a, float *b, float *c, int n) { int idx = blockIdx.x*blockDim.x + threadIdx.x; //return; if (idx < n) { c[idx] = a[idx] + b[idx]; //c[idx] = 10.0; } //printf("Test\n"); return; } // Main cuda function void runCudaPart(float *a, float *b, float *c, int n) { float *a_d, *b_d, *c_d; size_t size = n * sizeof(float); cudaMalloc((void **) &a_d, size); cudaMalloc((void **) &b_d, size); cudaMalloc((void **) &c_d, size); cudaMemcpy(a_d, a, size,cudaMemcpyHostToDevice); cudaMemcpy(b_d, b, size,cudaMemcpyHostToDevice); cudaMemcpy(c_d, c, size,cudaMemcpyHostToDevice); //cudaMemset(c_d, 0, n); printf("Executing CUDA kernel\n"); myKernel <<<1,100>>> (a_d, b_d, c_d, n); cudaMemcpy(c, c_d, size, cudaMemcpyDeviceToHost); printf("Kernel ended.\n"); }
14f16a44fa0df48ec51ad413f2290ee9fd019f9c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void gpu_init(int *mapad, int max, int size){ /*Identificaciones necesarios*/ int IDX_Thread = threadIdx.x; /*Identificacion del hilo en la dimension*/ int IDY_Thread = threadIdx.y; /*Identificacion del hilo en la dimension y*/ int IDX_block = blockIdx.x; /*Identificacion del bloque en la dimension x*/ int IDY_block = blockIdx.y; /*Identificacion del bloque en la dimension y */ int shapeGrid_X = gridDim.x; /*Numeros del bloques en la dimension */ int threads_per_block = blockDim.x * blockDim.y; /* Numero de hilos por bloque (1 dimension) */ /*Formula para calcular la posicion*/ //Posicion del vector dependiendo del hilo y del bloque int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread); //inicializamos if(position<size) mapad[position] = max; }
14f16a44fa0df48ec51ad413f2290ee9fd019f9c.cu
#include "includes.h" __global__ void gpu_init(int *mapad, int max, int size){ /*Identificaciones necesarios*/ int IDX_Thread = threadIdx.x; /*Identificacion del hilo en la dimension*/ int IDY_Thread = threadIdx.y; /*Identificacion del hilo en la dimension y*/ int IDX_block = blockIdx.x; /*Identificacion del bloque en la dimension x*/ int IDY_block = blockIdx.y; /*Identificacion del bloque en la dimension y */ int shapeGrid_X = gridDim.x; /*Numeros del bloques en la dimension */ int threads_per_block = blockDim.x * blockDim.y; /* Numero de hilos por bloque (1 dimension) */ /*Formula para calcular la posicion*/ //Posicion del vector dependiendo del hilo y del bloque int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread); //inicializamos if(position<size) mapad[position] = max; }
a45b2a54be21317b8ae03cc79c4737141de14799.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/detail/concatenate.cuh> #include <cudf/detail/indexalator.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/stream_compaction.hpp> #include <cudf/dictionary/detail/concatenate.hpp> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/transform_scan.h> #include <algorithm> #include <vector> namespace cudf { namespace dictionary { namespace detail { namespace { /** * @brief Keys and indices offsets values. * * The first value is the keys offsets and the second values is the indices offsets. * These are offsets to the beginning of each input column after concatenating. */ using offsets_pair = thrust::pair<size_type, size_type>; /** * @brief Utility for calculating the offsets for the concatenated child columns * of the output dictionary column. */ struct compute_children_offsets_fn { /** * @brief Create the utility functor. * * The columns vector is converted into vector of column_view pointers so they * can be used in thrust::transform_exclusive_scan without causing the * compiler warning/error: "host/device function calling host function". * * @param columns The input dictionary columns. */ compute_children_offsets_fn(host_span<column_view const> columns) : columns_ptrs{columns.size()} { std::transform( columns.begin(), columns.end(), columns_ptrs.begin(), [](auto& cv) { return &cv; }); } /** * @brief Return the first keys().type of the dictionary columns. */ data_type get_keys_type() { auto const view(*std::find_if( columns_ptrs.begin(), columns_ptrs.end(), [](auto pcv) { return pcv->size() > 0; })); return dictionary_column_view(*view).keys().type(); } /** * @brief Create the offsets pair for the concatenated columns. * * Both vectors have the length of the number of input columns. * The sizes of each child (keys and indices) of the individual columns * are used to create the offsets. * * @param stream Stream used for allocating the output rmm::device_uvector. * @return Vector of offsets_pair objects for keys and indices. */ rmm::device_uvector<offsets_pair> create_children_offsets(rmm::cuda_stream_view stream) { std::vector<offsets_pair> offsets(columns_ptrs.size()); thrust::transform_exclusive_scan( thrust::host, columns_ptrs.begin(), columns_ptrs.end(), offsets.begin(), [](auto pcv) { dictionary_column_view view(*pcv); return offsets_pair{view.keys_size(), view.size()}; }, offsets_pair{0, 0}, [](auto lhs, auto rhs) { return offsets_pair{lhs.first + rhs.first, lhs.second + rhs.second}; }); auto d_offsets = rmm::device_uvector<offsets_pair>(offsets.size(), stream); CUDA_TRY(hipMemcpyAsync(d_offsets.data(), offsets.data(), offsets.size() * sizeof(offsets_pair), hipMemcpyHostToDevice, stream.value())); stream.synchronize(); return d_offsets; } private: std::vector<column_view const*> columns_ptrs; ///< pointer version of input column_view vector }; /** * @brief Type-dispatch functor for remapping the old indices to new values based * on the new key-set. * * The dispatch is based on the key type. * The output column is the updated indices child for the new dictionary column. */ struct dispatch_compute_indices { template <typename Element> typename std::enable_if_t<cudf::is_relationally_comparable<Element, Element>(), std::unique_ptr<column>> operator()(column_view const& all_keys, column_view const& all_indices, column_view const& new_keys, offsets_pair const* d_offsets, size_type const* d_map_to_keys, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto keys_view = column_device_view::create(all_keys, stream); auto indices_view = column_device_view::create(all_indices, stream); auto d_all_indices = *indices_view; auto indices_itr = cudf::detail::indexalator_factory::make_input_iterator(all_indices); // map the concatenated indices to the concatenated keys auto all_itr = thrust::make_permutation_iterator( keys_view->begin<Element>(), thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), [d_offsets, d_map_to_keys, d_all_indices, indices_itr] __device__(size_type idx) { if (d_all_indices.is_null(idx)) return 0; return indices_itr[idx] + d_offsets[d_map_to_keys[idx]].first; })); auto new_keys_view = column_device_view::create(new_keys, stream); auto begin = new_keys_view->begin<Element>(); auto end = new_keys_view->end<Element>(); // create the indices output column auto result = make_numeric_column( all_indices.type(), all_indices.size(), mask_state::UNALLOCATED, stream, mr); auto result_itr = cudf::detail::indexalator_factory::make_output_iterator(result->mutable_view()); // new indices values are computed by matching the concatenated keys to the new key set #ifdef NDEBUG thrust::lower_bound(rmm::exec_policy(stream), begin, end, all_itr, all_itr + all_indices.size(), result_itr, thrust::less<Element>()); #else // There is a problem with thrust::lower_bound and the output_indexalator. // https://github.com/NVIDIA/thrust/issues/1452; thrust team created nvbug 3322776 // This is a workaround. thrust::transform(rmm::exec_policy(stream), all_itr, all_itr + all_indices.size(), result_itr, [begin, end] __device__(auto key) { auto itr = thrust::lower_bound(thrust::seq, begin, end, key); return static_cast<size_type>(thrust::distance(begin, itr)); }); #endif return result; } template <typename Element, typename... Args> typename std::enable_if_t<!cudf::is_relationally_comparable<Element, Element>(), std::unique_ptr<column>> operator()(Args&&...) { CUDF_FAIL("dictionary concatenate not supported for this column type"); } }; } // namespace std::unique_ptr<column> concatenate(host_span<column_view const> columns, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // exception here is the same behavior as in cudf::concatenate CUDF_EXPECTS(not columns.empty(), "Unexpected empty list of columns to concatenate."); // concatenate the keys (and check the keys match) compute_children_offsets_fn child_offsets_fn{columns}; auto keys_type = child_offsets_fn.get_keys_type(); std::vector<column_view> keys_views(columns.size()); std::transform(columns.begin(), columns.end(), keys_views.begin(), [keys_type](auto cv) { auto dict_view = dictionary_column_view(cv); // empty column may not have keys so we create an empty column_view place-holder if (dict_view.is_empty()) return column_view{keys_type, 0, nullptr}; auto keys = dict_view.keys(); CUDF_EXPECTS(keys.type() == keys_type, "key types of all dictionary columns must match"); return keys; }); auto all_keys = cudf::detail::concatenate(keys_views, stream); // sort keys and remove duplicates; // this becomes the keys child for the output dictionary column auto table_keys = cudf::detail::drop_duplicates(table_view{{all_keys->view()}}, std::vector<size_type>{0}, duplicate_keep_option::KEEP_FIRST, null_equality::EQUAL, null_order::BEFORE, stream, mr) ->release(); std::unique_ptr<column> keys_column(std::move(table_keys.front())); // next, concatenate the indices std::vector<column_view> indices_views(columns.size()); std::transform(columns.begin(), columns.end(), indices_views.begin(), [](auto cv) { auto dict_view = dictionary_column_view(cv); if (dict_view.is_empty()) return column_view{data_type{type_id::UINT32}, 0, nullptr}; return dict_view.get_indices_annotated(); // nicely includes validity mask and view offset }); auto all_indices = cudf::detail::concatenate(indices_views, stream, mr); auto const indices_size = all_indices->size(); // build a vector of values to map the old indices to the concatenated keys auto children_offsets = child_offsets_fn.create_children_offsets(stream); rmm::device_uvector<size_type> map_to_keys(indices_size, stream); auto indices_itr = cudf::detail::make_counting_transform_iterator(1, [] __device__(size_type idx) { return offsets_pair{0, idx}; }); // the indices offsets (pair.second) are for building the map thrust::lower_bound( rmm::exec_policy(stream), children_offsets.begin() + 1, children_offsets.end(), indices_itr, indices_itr + indices_size + 1, map_to_keys.begin(), [] __device__(auto const& lhs, auto const& rhs) { return lhs.second < rhs.second; }); // now recompute the indices values for the new keys_column; // the keys offsets (pair.first) are for mapping to the input keys auto indices_column = type_dispatcher(keys_type, dispatch_compute_indices{}, all_keys->view(), // old keys all_indices->view(), // old indices keys_column->view(), // new keys children_offsets.data(), map_to_keys.data(), stream, mr); // remove the bitmask from the all_indices auto null_count = all_indices->null_count(); // get before release() auto contents = all_indices->release(); // all_indices will now be empty // finally, frankenstein that dictionary column together return make_dictionary_column(std::move(keys_column), std::move(indices_column), std::move(*(contents.null_mask.release())), null_count); } } // namespace detail } // namespace dictionary } // namespace cudf
a45b2a54be21317b8ae03cc79c4737141de14799.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/detail/concatenate.cuh> #include <cudf/detail/indexalator.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/stream_compaction.hpp> #include <cudf/dictionary/detail/concatenate.hpp> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/transform_scan.h> #include <algorithm> #include <vector> namespace cudf { namespace dictionary { namespace detail { namespace { /** * @brief Keys and indices offsets values. * * The first value is the keys offsets and the second values is the indices offsets. * These are offsets to the beginning of each input column after concatenating. */ using offsets_pair = thrust::pair<size_type, size_type>; /** * @brief Utility for calculating the offsets for the concatenated child columns * of the output dictionary column. */ struct compute_children_offsets_fn { /** * @brief Create the utility functor. * * The columns vector is converted into vector of column_view pointers so they * can be used in thrust::transform_exclusive_scan without causing the * compiler warning/error: "host/device function calling host function". * * @param columns The input dictionary columns. */ compute_children_offsets_fn(host_span<column_view const> columns) : columns_ptrs{columns.size()} { std::transform( columns.begin(), columns.end(), columns_ptrs.begin(), [](auto& cv) { return &cv; }); } /** * @brief Return the first keys().type of the dictionary columns. */ data_type get_keys_type() { auto const view(*std::find_if( columns_ptrs.begin(), columns_ptrs.end(), [](auto pcv) { return pcv->size() > 0; })); return dictionary_column_view(*view).keys().type(); } /** * @brief Create the offsets pair for the concatenated columns. * * Both vectors have the length of the number of input columns. * The sizes of each child (keys and indices) of the individual columns * are used to create the offsets. * * @param stream Stream used for allocating the output rmm::device_uvector. * @return Vector of offsets_pair objects for keys and indices. */ rmm::device_uvector<offsets_pair> create_children_offsets(rmm::cuda_stream_view stream) { std::vector<offsets_pair> offsets(columns_ptrs.size()); thrust::transform_exclusive_scan( thrust::host, columns_ptrs.begin(), columns_ptrs.end(), offsets.begin(), [](auto pcv) { dictionary_column_view view(*pcv); return offsets_pair{view.keys_size(), view.size()}; }, offsets_pair{0, 0}, [](auto lhs, auto rhs) { return offsets_pair{lhs.first + rhs.first, lhs.second + rhs.second}; }); auto d_offsets = rmm::device_uvector<offsets_pair>(offsets.size(), stream); CUDA_TRY(cudaMemcpyAsync(d_offsets.data(), offsets.data(), offsets.size() * sizeof(offsets_pair), cudaMemcpyHostToDevice, stream.value())); stream.synchronize(); return d_offsets; } private: std::vector<column_view const*> columns_ptrs; ///< pointer version of input column_view vector }; /** * @brief Type-dispatch functor for remapping the old indices to new values based * on the new key-set. * * The dispatch is based on the key type. * The output column is the updated indices child for the new dictionary column. */ struct dispatch_compute_indices { template <typename Element> typename std::enable_if_t<cudf::is_relationally_comparable<Element, Element>(), std::unique_ptr<column>> operator()(column_view const& all_keys, column_view const& all_indices, column_view const& new_keys, offsets_pair const* d_offsets, size_type const* d_map_to_keys, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto keys_view = column_device_view::create(all_keys, stream); auto indices_view = column_device_view::create(all_indices, stream); auto d_all_indices = *indices_view; auto indices_itr = cudf::detail::indexalator_factory::make_input_iterator(all_indices); // map the concatenated indices to the concatenated keys auto all_itr = thrust::make_permutation_iterator( keys_view->begin<Element>(), thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), [d_offsets, d_map_to_keys, d_all_indices, indices_itr] __device__(size_type idx) { if (d_all_indices.is_null(idx)) return 0; return indices_itr[idx] + d_offsets[d_map_to_keys[idx]].first; })); auto new_keys_view = column_device_view::create(new_keys, stream); auto begin = new_keys_view->begin<Element>(); auto end = new_keys_view->end<Element>(); // create the indices output column auto result = make_numeric_column( all_indices.type(), all_indices.size(), mask_state::UNALLOCATED, stream, mr); auto result_itr = cudf::detail::indexalator_factory::make_output_iterator(result->mutable_view()); // new indices values are computed by matching the concatenated keys to the new key set #ifdef NDEBUG thrust::lower_bound(rmm::exec_policy(stream), begin, end, all_itr, all_itr + all_indices.size(), result_itr, thrust::less<Element>()); #else // There is a problem with thrust::lower_bound and the output_indexalator. // https://github.com/NVIDIA/thrust/issues/1452; thrust team created nvbug 3322776 // This is a workaround. thrust::transform(rmm::exec_policy(stream), all_itr, all_itr + all_indices.size(), result_itr, [begin, end] __device__(auto key) { auto itr = thrust::lower_bound(thrust::seq, begin, end, key); return static_cast<size_type>(thrust::distance(begin, itr)); }); #endif return result; } template <typename Element, typename... Args> typename std::enable_if_t<!cudf::is_relationally_comparable<Element, Element>(), std::unique_ptr<column>> operator()(Args&&...) { CUDF_FAIL("dictionary concatenate not supported for this column type"); } }; } // namespace std::unique_ptr<column> concatenate(host_span<column_view const> columns, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // exception here is the same behavior as in cudf::concatenate CUDF_EXPECTS(not columns.empty(), "Unexpected empty list of columns to concatenate."); // concatenate the keys (and check the keys match) compute_children_offsets_fn child_offsets_fn{columns}; auto keys_type = child_offsets_fn.get_keys_type(); std::vector<column_view> keys_views(columns.size()); std::transform(columns.begin(), columns.end(), keys_views.begin(), [keys_type](auto cv) { auto dict_view = dictionary_column_view(cv); // empty column may not have keys so we create an empty column_view place-holder if (dict_view.is_empty()) return column_view{keys_type, 0, nullptr}; auto keys = dict_view.keys(); CUDF_EXPECTS(keys.type() == keys_type, "key types of all dictionary columns must match"); return keys; }); auto all_keys = cudf::detail::concatenate(keys_views, stream); // sort keys and remove duplicates; // this becomes the keys child for the output dictionary column auto table_keys = cudf::detail::drop_duplicates(table_view{{all_keys->view()}}, std::vector<size_type>{0}, duplicate_keep_option::KEEP_FIRST, null_equality::EQUAL, null_order::BEFORE, stream, mr) ->release(); std::unique_ptr<column> keys_column(std::move(table_keys.front())); // next, concatenate the indices std::vector<column_view> indices_views(columns.size()); std::transform(columns.begin(), columns.end(), indices_views.begin(), [](auto cv) { auto dict_view = dictionary_column_view(cv); if (dict_view.is_empty()) return column_view{data_type{type_id::UINT32}, 0, nullptr}; return dict_view.get_indices_annotated(); // nicely includes validity mask and view offset }); auto all_indices = cudf::detail::concatenate(indices_views, stream, mr); auto const indices_size = all_indices->size(); // build a vector of values to map the old indices to the concatenated keys auto children_offsets = child_offsets_fn.create_children_offsets(stream); rmm::device_uvector<size_type> map_to_keys(indices_size, stream); auto indices_itr = cudf::detail::make_counting_transform_iterator(1, [] __device__(size_type idx) { return offsets_pair{0, idx}; }); // the indices offsets (pair.second) are for building the map thrust::lower_bound( rmm::exec_policy(stream), children_offsets.begin() + 1, children_offsets.end(), indices_itr, indices_itr + indices_size + 1, map_to_keys.begin(), [] __device__(auto const& lhs, auto const& rhs) { return lhs.second < rhs.second; }); // now recompute the indices values for the new keys_column; // the keys offsets (pair.first) are for mapping to the input keys auto indices_column = type_dispatcher(keys_type, dispatch_compute_indices{}, all_keys->view(), // old keys all_indices->view(), // old indices keys_column->view(), // new keys children_offsets.data(), map_to_keys.data(), stream, mr); // remove the bitmask from the all_indices auto null_count = all_indices->null_count(); // get before release() auto contents = all_indices->release(); // all_indices will now be empty // finally, frankenstein that dictionary column together return make_dictionary_column(std::move(keys_column), std::move(indices_column), std::move(*(contents.null_mask.release())), null_count); } } // namespace detail } // namespace dictionary } // namespace cudf
e08bbe89e9c832c390fd0a2ec3bb70aa9979c632.hip
// !!! This is a file automatically generated by hipify!!! #define GLM_FORCE_CUDA #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 3.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. glm::vec3 *dev_posReshuffled; glm::vec3 *dev_vel1Reshuffled; // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to hipFree in Boids::endSimulation. hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_pos failed!"); hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!"); hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!"); hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!"); hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!"); hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!"); hipMalloc((void**)&dev_posReshuffled, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_posReshuffled failed!"); hipMalloc((void**)&dev_vel1Reshuffled, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel1Reshuffled failed!"); hipDeviceSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); hipDeviceSynchronize(); } /****************** * stepSimulation * ******************/ /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { glm::vec3 center = pos[iSelf]; float N1 = 1.f; glm::vec3 c = glm::vec3(0.0f, 0.0f, 0.0f); glm::vec3 velocity = glm::vec3(0.0f, 0.0f, 0.0f); float N3 = 0.f; for (int i = 0; i < N; i++) { if (i == iSelf) continue; float dist = glm::distance(pos[i], pos[iSelf]); if (dist < rule1Distance) { center += pos[i]; N1++; } if (dist < rule2Distance) c -= (pos[i] - pos[iSelf]); if (dist < rule3Distance) { velocity += vel[i]; N3++; } } glm::vec3 sumVel = glm::vec3(0.0f, 0.0f, 0.0f); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (N1 > 0) { center /= N1; sumVel += (center - pos[iSelf]) * rule1Scale; } // Rule 2: boids try to stay a distance d away from each other sumVel += c * rule2Scale; // Rule 3: boids try to match the speed of surrounding boids if (N3 > 0) { velocity /= N3; sumVel += velocity * rule3Scale; } return sumVel; } /** * TODO-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // Compute a new velocity based on pos and vel1 int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 newVel = vel1[index] + computeVelocityChange(N, index, pos, vel1); // Clamp the speed float speed = glm::length(newVel); if (speed > maxSpeed) { newVel = glm::normalize(newVel) * maxSpeed; } // Record the new velocity into vel2. Question: why NOT vel1? vel2[index] = newVel; } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(x) // for(y) // for(z)? Or some other order? __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { // TODO-2.1 int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } // - Label each boid with the index of its grid cell. glm::vec3 gridLoc = glm::floor((pos[index] - gridMin)*inverseCellWidth); gridIndices[index] = gridIndex3Dto1D(gridLoc[0], gridLoc[1], gridLoc[2], gridResolution); // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 indices[index] = index; } // LOOK-2.1 Consider how this could be useful for indicating that a cell // does not enclose any boids __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // TODO-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int thisCell = particleGridIndices[index]; if (index == 0) { gridCellStartIndices[thisCell] = index; } if (index == N - 1) { gridCellEndIndices[thisCell] = index; return; } int nextCell = particleGridIndices[index + 1]; if (nextCell != thisCell) { gridCellEndIndices[thisCell] = index; gridCellStartIndices[nextCell] = index + 1; } } __global__ void kernCreateCoherentBuffers( int N, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel, glm::vec3 *posShuffle, glm::vec3 *velShuffle) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int idxSorted = particleArrayIndices[index]; posShuffle[index] = pos[idxSorted]; velShuffle[index] = vel[idxSorted]; } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } glm::vec3 center = pos[index]; float N1 = 1.f; glm::vec3 c = glm::vec3(0.0f, 0.0f, 0.0f); glm::vec3 velocity = glm::vec3(0.0f, 0.0f, 0.0f); float N3 = 0.f; // - Identify the grid cell that this particle is in glm::vec3 gridLoc = (pos[index] - gridMin)*inverseCellWidth; int x1 = floor(gridLoc[0]); int y1 = floor(gridLoc[1]); int z1 = floor(gridLoc[2]); glm::vec3 gridCell = glm::floor(gridLoc); // - Identify which cells may contain neighbors. This isn't always 8. glm::vec3 midCellPos = (gridCell + glm::vec3(0.5)) * cellWidth; // location of center of grid cell this boid is in glm::vec3 sign = glm::sign(gridLoc - midCellPos); // vector of signs representing whether the boid is in the first or sencond half of the cell wrt the three dims for (int z = 0; z <= 1; z++) { for (int y = 0; y <= 1; y++) { for (int x = 0; x <= 1; x++) { glm::vec3 cellToCheck = gridCell + glm::vec3(x, y, z) * sign; if (cellToCheck[0] < 0 || cellToCheck[0] >= gridResolution || cellToCheck[1] < 0 || cellToCheck[1] >= gridResolution || cellToCheck[2] < 0 || cellToCheck[2] >= gridResolution) continue; // - For each cell, read the start/end indices in the boid pointer array. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. //int cell = gridIndex3Dto1D(i, j, k, gridResolution); int cell = gridIndex3Dto1D(cellToCheck[0], cellToCheck[1], cellToCheck[2], gridResolution); for (int b = gridCellStartIndices[cell]; b <= gridCellEndIndices[cell]; b++) { if (b < 0) break; int bIdx = particleArrayIndices[b]; // this boid's index in the particle array if (bIdx == index) continue; float dist = glm::distance(pos[bIdx], pos[index]); if (dist < rule1Distance) { center += pos[bIdx]; N1++; } if (dist < rule2Distance) c -= (pos[bIdx] - pos[index]); if (dist < rule3Distance) { velocity += vel1[bIdx]; N3++; } } } } } glm::vec3 sumVel = vel1[index]; if (N1 > 0) { center /= N1; sumVel += (center - pos[index]) * rule1Scale; } sumVel += c * rule2Scale; if (N3 > 0) { velocity /= N3; sumVel += velocity * rule3Scale; } // - Clamp the speed change before putting the new speed in vel2 float speed = glm::length(sumVel); if (speed > maxSpeed) { sumVel = glm::normalize(sumVel) * maxSpeed; } vel2[index] = sumVel; } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, // except with one less level of indirection. // This should expect gridCellStartIndices and gridCellEndIndices to refer // directly to pos and vel1. int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } glm::vec3 center = pos[index]; float N1 = 1.f; glm::vec3 c = glm::vec3(0.0f, 0.0f, 0.0f); glm::vec3 velocity = glm::vec3(0.0f, 0.0f, 0.0f); float N3 = 0.f; // - Identify the grid cell that this particle is in glm::vec3 gridLoc = (pos[index] - gridMin)*inverseCellWidth; int x1 = floor(gridLoc[0]); int y1 = floor(gridLoc[1]); int z1 = floor(gridLoc[2]); glm::vec3 gridCell = glm::floor(gridLoc); // - Identify which cells may contain neighbors. This isn't always 8. glm::vec3 midCellPos = (gridCell + glm::vec3(0.5)) * cellWidth; // location of center of grid cell this boid is in glm::vec3 sign = glm::sign(gridLoc - midCellPos); // vector of signs representing whether the boid is in the first or sencond half of the cell wrt the three dims for (int z = 0; z <= 1; z++) { for (int y = 0; y <= 1; y++) { for (int x = 0; x <= 1; x++) { glm::vec3 cellToCheck = gridCell + glm::vec3(x, y, z) * sign; if (cellToCheck[0] < 0 || cellToCheck[0] >= gridResolution || cellToCheck[1] < 0 || cellToCheck[1] >= gridResolution || cellToCheck[2] < 0 || cellToCheck[2] >= gridResolution) continue; // - For each cell, read the start/end indices in the boid pointer array. // For best results, consider what order the cells should be // checked in to maximize the memory benefits of reordering the boids data. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. //int cell = gridIndex3Dto1D(i, j, k, gridResolution); int cell = gridIndex3Dto1D(cellToCheck[0], cellToCheck[1], cellToCheck[2], gridResolution); for (int b = gridCellStartIndices[cell]; b <= gridCellEndIndices[cell]; b++) { if (b < 0) break; if (b == index) continue; float dist = glm::distance(pos[b], pos[index]); if (dist < rule1Distance) { center += pos[b]; N1++; } if (dist < rule2Distance) c -= (pos[b] - pos[index]); if (dist < rule3Distance) { velocity += vel1[b]; N3++; } } } } } glm::vec3 sumVel = vel1[index]; if (N1 > 0) { center /= N1; sumVel += (center - pos[index]) * rule1Scale; } sumVel += c * rule2Scale; if (N3 > 0) { velocity /= N3; sumVel += velocity * rule3Scale; } // - Clamp the speed change before putting the new speed in vel2 float speed = glm::length(sumVel); if (speed > maxSpeed) { sumVel = glm::normalize(sumVel) * maxSpeed; } vel2[index] = sumVel; } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { // TODO-1.2 - use the kernels you wrote to step the simulation forward in time. dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel1); checkCUDAErrorWithLine("kernUpdatePos failed!"); kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, dev_vel1, dev_vel2); checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!"); // TODO-1.2 ping-pong the velocity buffers std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationScatteredGrid(float dt) { // TODO-2.1 dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); dim3 fullBlocksPerGridForGridCells((gridCellCount + blockSize - 1) / blockSize); kernResetIntBuffer << <fullBlocksPerGridForGridCells, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1); checkCUDAErrorWithLine("kernResetIntBuffer on startIndices failed!"); kernResetIntBuffer << <fullBlocksPerGridForGridCells, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1); checkCUDAErrorWithLine("kernResetIntBuffer on endIndices failed!"); // Uniform Grid Neighbor search using Thrust sort. // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); checkCUDAErrorWithLine("kernComputeIndices failed!"); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::device_ptr<int> dev_thrust_particleArrayIndices(dev_particleArrayIndices); thrust::device_ptr<int> dev_thrust_particleGridIndices(dev_particleGridIndices); thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!"); // - Perform velocity updates using neighbor search kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2); checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!"); // - Update positions kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2); checkCUDAErrorWithLine("kernUpdatePos failed!"); // - Ping-pong buffers as needed std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationCoherentGrid(float dt) { // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); dim3 fullBlocksPerGridForGridCells((gridCellCount + blockSize - 1) / blockSize); kernResetIntBuffer << <fullBlocksPerGridForGridCells, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1); checkCUDAErrorWithLine("kernResetIntBuffer on startIndices failed!"); kernResetIntBuffer << <fullBlocksPerGridForGridCells, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1); checkCUDAErrorWithLine("kernResetIntBuffer on endIndices failed!"); // - label each particle with its array index as well as its grid index. // Use 2x width grids. kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); checkCUDAErrorWithLine("kernComputeIndices failed!"); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::device_ptr<int> dev_thrust_particleArrayIndices(dev_particleArrayIndices); thrust::device_ptr<int> dev_thrust_particleGridIndices(dev_particleGridIndices); thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!"); // - Use the rearranged array index buffer to reshuffle all // the particle data in the simulation array. // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED kernCreateCoherentBuffers << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_vel1, dev_posReshuffled, dev_vel1Reshuffled); checkCUDAErrorWithLine("kernCreateCoherentBuffers failed!"); // - Perform velocity updates using neighbor search kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_posReshuffled, dev_vel1Reshuffled, dev_vel2); checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!"); // - Update positions std::swap(dev_pos, dev_posReshuffled); kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2); checkCUDAErrorWithLine("kernUpdatePos failed!"); // - Ping-pong buffers as needed std::swap(dev_vel1, dev_vel2); } void Boids::endSimulation() { hipFree(dev_vel1); hipFree(dev_vel2); hipFree(dev_pos); // TODO-2.1 TODO-2.3 - Free any additional buffers here. hipFree(dev_particleArrayIndices); hipFree(dev_particleGridIndices); hipFree(dev_gridCellStartIndices); hipFree(dev_gridCellEndIndices); hipFree(dev_posReshuffled); hipFree(dev_vel1Reshuffled); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; std::unique_ptr<int[]>intKeys{ new int[N] }; std::unique_ptr<int[]>intValues{ new int[N] }; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; hipMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!"); hipMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice); hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost); hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup hipFree(dev_intKeys); hipFree(dev_intValues); checkCUDAErrorWithLine("hipFree failed!"); return; }
e08bbe89e9c832c390fd0a2ec3bb70aa9979c632.cu
#define GLM_FORCE_CUDA #include <stdio.h> #include <cuda.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 3.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. glm::vec3 *dev_posReshuffled; glm::vec3 *dev_vel1Reshuffled; // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to cudaFree in Boids::endSimulation. cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!"); cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!"); cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!"); cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!"); cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!"); cudaMalloc((void**)&dev_posReshuffled, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_posReshuffled failed!"); cudaMalloc((void**)&dev_vel1Reshuffled, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel1Reshuffled failed!"); cudaDeviceSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); cudaDeviceSynchronize(); } /****************** * stepSimulation * ******************/ /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { glm::vec3 center = pos[iSelf]; float N1 = 1.f; glm::vec3 c = glm::vec3(0.0f, 0.0f, 0.0f); glm::vec3 velocity = glm::vec3(0.0f, 0.0f, 0.0f); float N3 = 0.f; for (int i = 0; i < N; i++) { if (i == iSelf) continue; float dist = glm::distance(pos[i], pos[iSelf]); if (dist < rule1Distance) { center += pos[i]; N1++; } if (dist < rule2Distance) c -= (pos[i] - pos[iSelf]); if (dist < rule3Distance) { velocity += vel[i]; N3++; } } glm::vec3 sumVel = glm::vec3(0.0f, 0.0f, 0.0f); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (N1 > 0) { center /= N1; sumVel += (center - pos[iSelf]) * rule1Scale; } // Rule 2: boids try to stay a distance d away from each other sumVel += c * rule2Scale; // Rule 3: boids try to match the speed of surrounding boids if (N3 > 0) { velocity /= N3; sumVel += velocity * rule3Scale; } return sumVel; } /** * TODO-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // Compute a new velocity based on pos and vel1 int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 newVel = vel1[index] + computeVelocityChange(N, index, pos, vel1); // Clamp the speed float speed = glm::length(newVel); if (speed > maxSpeed) { newVel = glm::normalize(newVel) * maxSpeed; } // Record the new velocity into vel2. Question: why NOT vel1? vel2[index] = newVel; } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(x) // for(y) // for(z)? Or some other order? __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { // TODO-2.1 int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } // - Label each boid with the index of its grid cell. glm::vec3 gridLoc = glm::floor((pos[index] - gridMin)*inverseCellWidth); gridIndices[index] = gridIndex3Dto1D(gridLoc[0], gridLoc[1], gridLoc[2], gridResolution); // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 indices[index] = index; } // LOOK-2.1 Consider how this could be useful for indicating that a cell // does not enclose any boids __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // TODO-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int thisCell = particleGridIndices[index]; if (index == 0) { gridCellStartIndices[thisCell] = index; } if (index == N - 1) { gridCellEndIndices[thisCell] = index; return; } int nextCell = particleGridIndices[index + 1]; if (nextCell != thisCell) { gridCellEndIndices[thisCell] = index; gridCellStartIndices[nextCell] = index + 1; } } __global__ void kernCreateCoherentBuffers( int N, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel, glm::vec3 *posShuffle, glm::vec3 *velShuffle) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int idxSorted = particleArrayIndices[index]; posShuffle[index] = pos[idxSorted]; velShuffle[index] = vel[idxSorted]; } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } glm::vec3 center = pos[index]; float N1 = 1.f; glm::vec3 c = glm::vec3(0.0f, 0.0f, 0.0f); glm::vec3 velocity = glm::vec3(0.0f, 0.0f, 0.0f); float N3 = 0.f; // - Identify the grid cell that this particle is in glm::vec3 gridLoc = (pos[index] - gridMin)*inverseCellWidth; int x1 = floor(gridLoc[0]); int y1 = floor(gridLoc[1]); int z1 = floor(gridLoc[2]); glm::vec3 gridCell = glm::floor(gridLoc); // - Identify which cells may contain neighbors. This isn't always 8. glm::vec3 midCellPos = (gridCell + glm::vec3(0.5)) * cellWidth; // location of center of grid cell this boid is in glm::vec3 sign = glm::sign(gridLoc - midCellPos); // vector of signs representing whether the boid is in the first or sencond half of the cell wrt the three dims for (int z = 0; z <= 1; z++) { for (int y = 0; y <= 1; y++) { for (int x = 0; x <= 1; x++) { glm::vec3 cellToCheck = gridCell + glm::vec3(x, y, z) * sign; if (cellToCheck[0] < 0 || cellToCheck[0] >= gridResolution || cellToCheck[1] < 0 || cellToCheck[1] >= gridResolution || cellToCheck[2] < 0 || cellToCheck[2] >= gridResolution) continue; // - For each cell, read the start/end indices in the boid pointer array. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. //int cell = gridIndex3Dto1D(i, j, k, gridResolution); int cell = gridIndex3Dto1D(cellToCheck[0], cellToCheck[1], cellToCheck[2], gridResolution); for (int b = gridCellStartIndices[cell]; b <= gridCellEndIndices[cell]; b++) { if (b < 0) break; int bIdx = particleArrayIndices[b]; // this boid's index in the particle array if (bIdx == index) continue; float dist = glm::distance(pos[bIdx], pos[index]); if (dist < rule1Distance) { center += pos[bIdx]; N1++; } if (dist < rule2Distance) c -= (pos[bIdx] - pos[index]); if (dist < rule3Distance) { velocity += vel1[bIdx]; N3++; } } } } } glm::vec3 sumVel = vel1[index]; if (N1 > 0) { center /= N1; sumVel += (center - pos[index]) * rule1Scale; } sumVel += c * rule2Scale; if (N3 > 0) { velocity /= N3; sumVel += velocity * rule3Scale; } // - Clamp the speed change before putting the new speed in vel2 float speed = glm::length(sumVel); if (speed > maxSpeed) { sumVel = glm::normalize(sumVel) * maxSpeed; } vel2[index] = sumVel; } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, // except with one less level of indirection. // This should expect gridCellStartIndices and gridCellEndIndices to refer // directly to pos and vel1. int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } glm::vec3 center = pos[index]; float N1 = 1.f; glm::vec3 c = glm::vec3(0.0f, 0.0f, 0.0f); glm::vec3 velocity = glm::vec3(0.0f, 0.0f, 0.0f); float N3 = 0.f; // - Identify the grid cell that this particle is in glm::vec3 gridLoc = (pos[index] - gridMin)*inverseCellWidth; int x1 = floor(gridLoc[0]); int y1 = floor(gridLoc[1]); int z1 = floor(gridLoc[2]); glm::vec3 gridCell = glm::floor(gridLoc); // - Identify which cells may contain neighbors. This isn't always 8. glm::vec3 midCellPos = (gridCell + glm::vec3(0.5)) * cellWidth; // location of center of grid cell this boid is in glm::vec3 sign = glm::sign(gridLoc - midCellPos); // vector of signs representing whether the boid is in the first or sencond half of the cell wrt the three dims for (int z = 0; z <= 1; z++) { for (int y = 0; y <= 1; y++) { for (int x = 0; x <= 1; x++) { glm::vec3 cellToCheck = gridCell + glm::vec3(x, y, z) * sign; if (cellToCheck[0] < 0 || cellToCheck[0] >= gridResolution || cellToCheck[1] < 0 || cellToCheck[1] >= gridResolution || cellToCheck[2] < 0 || cellToCheck[2] >= gridResolution) continue; // - For each cell, read the start/end indices in the boid pointer array. // For best results, consider what order the cells should be // checked in to maximize the memory benefits of reordering the boids data. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. //int cell = gridIndex3Dto1D(i, j, k, gridResolution); int cell = gridIndex3Dto1D(cellToCheck[0], cellToCheck[1], cellToCheck[2], gridResolution); for (int b = gridCellStartIndices[cell]; b <= gridCellEndIndices[cell]; b++) { if (b < 0) break; if (b == index) continue; float dist = glm::distance(pos[b], pos[index]); if (dist < rule1Distance) { center += pos[b]; N1++; } if (dist < rule2Distance) c -= (pos[b] - pos[index]); if (dist < rule3Distance) { velocity += vel1[b]; N3++; } } } } } glm::vec3 sumVel = vel1[index]; if (N1 > 0) { center /= N1; sumVel += (center - pos[index]) * rule1Scale; } sumVel += c * rule2Scale; if (N3 > 0) { velocity /= N3; sumVel += velocity * rule3Scale; } // - Clamp the speed change before putting the new speed in vel2 float speed = glm::length(sumVel); if (speed > maxSpeed) { sumVel = glm::normalize(sumVel) * maxSpeed; } vel2[index] = sumVel; } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { // TODO-1.2 - use the kernels you wrote to step the simulation forward in time. dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel1); checkCUDAErrorWithLine("kernUpdatePos failed!"); kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, dev_vel1, dev_vel2); checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!"); // TODO-1.2 ping-pong the velocity buffers std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationScatteredGrid(float dt) { // TODO-2.1 dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); dim3 fullBlocksPerGridForGridCells((gridCellCount + blockSize - 1) / blockSize); kernResetIntBuffer << <fullBlocksPerGridForGridCells, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1); checkCUDAErrorWithLine("kernResetIntBuffer on startIndices failed!"); kernResetIntBuffer << <fullBlocksPerGridForGridCells, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1); checkCUDAErrorWithLine("kernResetIntBuffer on endIndices failed!"); // Uniform Grid Neighbor search using Thrust sort. // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); checkCUDAErrorWithLine("kernComputeIndices failed!"); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::device_ptr<int> dev_thrust_particleArrayIndices(dev_particleArrayIndices); thrust::device_ptr<int> dev_thrust_particleGridIndices(dev_particleGridIndices); thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!"); // - Perform velocity updates using neighbor search kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2); checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!"); // - Update positions kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2); checkCUDAErrorWithLine("kernUpdatePos failed!"); // - Ping-pong buffers as needed std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationCoherentGrid(float dt) { // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); dim3 fullBlocksPerGridForGridCells((gridCellCount + blockSize - 1) / blockSize); kernResetIntBuffer << <fullBlocksPerGridForGridCells, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1); checkCUDAErrorWithLine("kernResetIntBuffer on startIndices failed!"); kernResetIntBuffer << <fullBlocksPerGridForGridCells, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1); checkCUDAErrorWithLine("kernResetIntBuffer on endIndices failed!"); // - label each particle with its array index as well as its grid index. // Use 2x width grids. kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); checkCUDAErrorWithLine("kernComputeIndices failed!"); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::device_ptr<int> dev_thrust_particleArrayIndices(dev_particleArrayIndices); thrust::device_ptr<int> dev_thrust_particleGridIndices(dev_particleGridIndices); thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!"); // - Use the rearranged array index buffer to reshuffle all // the particle data in the simulation array. // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED kernCreateCoherentBuffers << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_vel1, dev_posReshuffled, dev_vel1Reshuffled); checkCUDAErrorWithLine("kernCreateCoherentBuffers failed!"); // - Perform velocity updates using neighbor search kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_posReshuffled, dev_vel1Reshuffled, dev_vel2); checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!"); // - Update positions std::swap(dev_pos, dev_posReshuffled); kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2); checkCUDAErrorWithLine("kernUpdatePos failed!"); // - Ping-pong buffers as needed std::swap(dev_vel1, dev_vel2); } void Boids::endSimulation() { cudaFree(dev_vel1); cudaFree(dev_vel2); cudaFree(dev_pos); // TODO-2.1 TODO-2.3 - Free any additional buffers here. cudaFree(dev_particleArrayIndices); cudaFree(dev_particleGridIndices); cudaFree(dev_gridCellStartIndices); cudaFree(dev_gridCellEndIndices); cudaFree(dev_posReshuffled); cudaFree(dev_vel1Reshuffled); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; std::unique_ptr<int[]>intKeys{ new int[N] }; std::unique_ptr<int[]>intValues{ new int[N] }; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; cudaMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!"); cudaMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice); cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost); cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup cudaFree(dev_intKeys); cudaFree(dev_intValues); checkCUDAErrorWithLine("cudaFree failed!"); return; }
60cb96fbc1aafc318e195c1884e23b7e74a4095b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "bpnn_layerforward_CUDA.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *input_cuda = NULL; hipMalloc(&input_cuda, XSIZE*YSIZE); float *output_hidden_cuda = NULL; hipMalloc(&output_hidden_cuda, XSIZE*YSIZE); float *input_hidden_cuda = NULL; hipMalloc(&input_hidden_cuda, XSIZE*YSIZE); float *hidden_partial_sum = NULL; hipMalloc(&hidden_partial_sum, XSIZE*YSIZE); int in = 1; int hid = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( bpnn_layerforward_CUDA), dim3(gridBlock),dim3(threadBlock), 0, 0, input_cuda,output_hidden_cuda,input_hidden_cuda,hidden_partial_sum,in,hid); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( bpnn_layerforward_CUDA), dim3(gridBlock),dim3(threadBlock), 0, 0, input_cuda,output_hidden_cuda,input_hidden_cuda,hidden_partial_sum,in,hid); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( bpnn_layerforward_CUDA), dim3(gridBlock),dim3(threadBlock), 0, 0, input_cuda,output_hidden_cuda,input_hidden_cuda,hidden_partial_sum,in,hid); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
60cb96fbc1aafc318e195c1884e23b7e74a4095b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "bpnn_layerforward_CUDA.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *input_cuda = NULL; cudaMalloc(&input_cuda, XSIZE*YSIZE); float *output_hidden_cuda = NULL; cudaMalloc(&output_hidden_cuda, XSIZE*YSIZE); float *input_hidden_cuda = NULL; cudaMalloc(&input_hidden_cuda, XSIZE*YSIZE); float *hidden_partial_sum = NULL; cudaMalloc(&hidden_partial_sum, XSIZE*YSIZE); int in = 1; int hid = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); bpnn_layerforward_CUDA<<<gridBlock,threadBlock>>>(input_cuda,output_hidden_cuda,input_hidden_cuda,hidden_partial_sum,in,hid); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { bpnn_layerforward_CUDA<<<gridBlock,threadBlock>>>(input_cuda,output_hidden_cuda,input_hidden_cuda,hidden_partial_sum,in,hid); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { bpnn_layerforward_CUDA<<<gridBlock,threadBlock>>>(input_cuda,output_hidden_cuda,input_hidden_cuda,hidden_partial_sum,in,hid); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
14382be3b50f713a3f2ab4eb40a05bf719796bdb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]>, created on 25.01.2019 // #include <loops/special_kernels.h> namespace sd { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // kernel to swap two NDArrays vals as linear sequences // input - theSecondBuffer/Shape from input NDArray // output - theFirstBuffer/Shape from input NDArray template <typename T> static __global__ void swapUnsafeKernel(void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; __shared__ Nd4jLong resultLength, xEws, yEws; __shared__ bool sameOffsets, sameOrders; __shared__ T* input; __shared__ T* output; if (0 == threadIdx.x) { resultLength = shape::length(theFirstShape); input = reinterpret_cast<T*>(theSecondBuffer); output = reinterpret_cast<T*>(theFirstBuffer); sameOffsets = shape::haveSameShapeAndStrides(theFirstShape, theSecondShape); sameOrders = shape::order(theFirstShape) == shape::order(theSecondShape); xEws = shape::elementWiseStride(theFirstShape); yEws = shape::elementWiseStride(theSecondShape); } __syncthreads(); for (int i = tid; i < resultLength; i += totalThreads) { if(sameOrders && xEws > 0 && yEws > 0) { sd::math::nd4j_swap(output[i*xEws], input[i*yEws]); } else if(sameOffsets) { const auto offset = shape::getIndexOffset(i, theFirstShape); sd::math::nd4j_swap(output[offset], input[offset]); } else{ const auto xOffset = shape::getIndexOffset(i, theFirstShape); const auto yOffset = shape::getIndexOffset(i, theSecondShape); sd::math::nd4j_swap(output[xOffset], input[yOffset]); } } } BUILD_SINGLE_TEMPLATE(template __global__ void swapUnsafeKernel, (void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape), LIBND4J_TYPES); template <typename T> void templatedSwapUnsafe(void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape, hipStream_t* theStream) { hipLaunchKernelGGL(( swapUnsafeKernel<T>), dim3(256), dim3(512), 8192, *theStream, theFirstBuffer, theFirstShape, theSecondBuffer, theSecondShape); } BUILD_SINGLE_TEMPLATE(template void templatedSwapUnsafe, (void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape, hipStream_t* theStream), LIBND4J_TYPES); }
14382be3b50f713a3f2ab4eb40a05bf719796bdb.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]>, created on 25.01.2019 // #include <loops/special_kernels.h> namespace sd { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // kernel to swap two NDArrays vals as linear sequences // input - theSecondBuffer/Shape from input NDArray // output - theFirstBuffer/Shape from input NDArray template <typename T> static __global__ void swapUnsafeKernel(void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; __shared__ Nd4jLong resultLength, xEws, yEws; __shared__ bool sameOffsets, sameOrders; __shared__ T* input; __shared__ T* output; if (0 == threadIdx.x) { resultLength = shape::length(theFirstShape); input = reinterpret_cast<T*>(theSecondBuffer); output = reinterpret_cast<T*>(theFirstBuffer); sameOffsets = shape::haveSameShapeAndStrides(theFirstShape, theSecondShape); sameOrders = shape::order(theFirstShape) == shape::order(theSecondShape); xEws = shape::elementWiseStride(theFirstShape); yEws = shape::elementWiseStride(theSecondShape); } __syncthreads(); for (int i = tid; i < resultLength; i += totalThreads) { if(sameOrders && xEws > 0 && yEws > 0) { sd::math::nd4j_swap(output[i*xEws], input[i*yEws]); } else if(sameOffsets) { const auto offset = shape::getIndexOffset(i, theFirstShape); sd::math::nd4j_swap(output[offset], input[offset]); } else{ const auto xOffset = shape::getIndexOffset(i, theFirstShape); const auto yOffset = shape::getIndexOffset(i, theSecondShape); sd::math::nd4j_swap(output[xOffset], input[yOffset]); } } } BUILD_SINGLE_TEMPLATE(template __global__ void swapUnsafeKernel, (void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape), LIBND4J_TYPES); template <typename T> void templatedSwapUnsafe(void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape, cudaStream_t* theStream) { swapUnsafeKernel<T><<<256, 512, 8192, *theStream>>>(theFirstBuffer, theFirstShape, theSecondBuffer, theSecondShape); } BUILD_SINGLE_TEMPLATE(template void templatedSwapUnsafe, (void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape, cudaStream_t* theStream), LIBND4J_TYPES); }
2be42d040ae58e3b9e180d954241833f8cda54fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "network_functions/PsiOkVector.hpp" #include "quantum_states.hpp" #include "spin_ensembles.hpp" #include "types.h" namespace rbm_on_gpu { template<typename Psi_t> void psi_O_k_vector(complex<double>* result, const Psi_t& psi, const Spins& spins) { complex_t* result_ptr; auto O_k_length = psi.get_num_params(); auto psi_kernel = psi.get_kernel(); // printf("O_k_length: %d\n", O_k_length); MALLOC(result_ptr, sizeof(complex_t) * O_k_length, psi.gpu); // MEMSET(result_ptr, 0, sizeof(complex_t) * O_k_length, psi.gpu); const auto functor = [=] __host__ __device__ () { #include "cuda_kernel_defines.h" SHARED typename Psi_t::Angles angles; angles.init(psi_kernel, spins); psi_kernel.foreach_O_k( spins, angles, [&](const unsigned int k, const complex_t& O_k_element) { // printf("%d, %f, %f\n", k, O_k_element.real(), O_k_element.imag()); result_ptr[k] = O_k_element; } ); }; if(psi.gpu) { hipLaunchKernelGGL(( cuda_kernel), dim3(1), dim3(psi.get_width()), 0, 0, functor); } else { functor(); } MEMCPY_TO_HOST(result, result_ptr, sizeof(complex_t) * O_k_length, psi.gpu); FREE(result_ptr, psi.gpu); } template<typename Psi_t, typename SpinEnsemble> void psi_O_k_vector(complex<double>* result, complex<double>* result_std, const Psi_t& psi, SpinEnsemble& spin_ensemble) { const auto O_k_length = psi.get_num_params(); const auto psi_kernel = psi.get_kernel(); complex_t* result_device; complex_t* result2_device; MALLOC(result_device, sizeof(complex_t) * O_k_length, psi.gpu); MALLOC(result2_device, sizeof(complex_t) * O_k_length, psi.gpu); MEMSET(result_device, 0, sizeof(complex_t) * O_k_length, psi.gpu); MEMSET(result2_device, 0, sizeof(complex_t) * O_k_length, psi.gpu); spin_ensemble.foreach( psi, [=] __device__ __host__ ( const unsigned int spin_index, const Spins spins, const complex_t log_psi, typename Psi_t::Angles& angles, const double weight ) { psi_kernel.foreach_O_k( spins, angles, [&](const unsigned int k, const complex_t& O_k_element) { generic_atomicAdd(&result_device[k], weight * O_k_element); const auto O_k_element2 = complex_t( O_k_element.real() * O_k_element.real(), O_k_element.imag() * O_k_element.imag() ); generic_atomicAdd(&result2_device[k], weight * O_k_element2); } ); } ); MEMCPY_TO_HOST(result, result_device, sizeof(complex_t) * O_k_length, psi.gpu); MEMCPY_TO_HOST(result_std, result2_device, sizeof(complex_t) * O_k_length, psi.gpu); FREE(result_device, psi.gpu); FREE(result2_device, psi.gpu); for(auto k = 0u; k < O_k_length; k++) { result[k] /= spin_ensemble.get_num_steps(); result_std[k] /= spin_ensemble.get_num_steps(); result_std[k] = result_std[k] - complex<double>( result[k].real() * result[k].real(), result[k].imag() * result[k].imag() ); } } template<typename Psi_t, typename SpinEnsemble> pair<Array<complex_t>, Array<double>> psi_O_k_vector(const Psi_t& psi, SpinEnsemble& spin_ensemble) { const auto O_k_length = psi.get_num_params(); const auto psi_kernel = psi.get_kernel(); Array<complex_t> result(O_k_length, psi.gpu); Array<double> result_std(O_k_length, psi.gpu); result.clear(); result_std.clear(); auto result_ptr = result.data(); auto result_std_ptr = result_std.data(); spin_ensemble.foreach( psi, [=] __device__ __host__ ( const unsigned int spin_index, const Spins spins, const complex_t log_psi, typename Psi_t::Angles& angles, const double weight ) { psi_kernel.foreach_O_k( spins, angles, [&](const unsigned int k, const complex_t& O_k_element) { generic_atomicAdd(&result_ptr[k], weight * O_k_element); generic_atomicAdd(&result_std_ptr[k], weight * (O_k_element * conj(O_k_element)).real()); } ); } ); result.update_host(); result_std.update_host(); for(auto k = 0u; k < O_k_length; k++) { result[k] /= spin_ensemble.get_num_steps(); result_std[k] /= spin_ensemble.get_num_steps(); result_std[k] = sqrt((result_std[k] - result[k] * conj(result[k])).real()); } return {result, result_std}; } #ifdef ENABLE_PSI template void psi_O_k_vector(complex<double>* result, const Psi& psi, const Spins& spins); #endif // ENABLE_PSI #ifdef ENABLE_PSI_DEEP template void psi_O_k_vector(complex<double>* result, const PsiDeep& psi, const Spins& spins); #endif // ENABLE_PSI_DEEP #ifdef ENABLE_PSI_PAIR // template void psi_O_k_vector(complex<double>* result, const PsiPair& psi, const Spins& spins); #endif // ENABLE_PSI_PAIR } // namespace rbm_on_gpu
2be42d040ae58e3b9e180d954241833f8cda54fc.cu
#include "network_functions/PsiOkVector.hpp" #include "quantum_states.hpp" #include "spin_ensembles.hpp" #include "types.h" namespace rbm_on_gpu { template<typename Psi_t> void psi_O_k_vector(complex<double>* result, const Psi_t& psi, const Spins& spins) { complex_t* result_ptr; auto O_k_length = psi.get_num_params(); auto psi_kernel = psi.get_kernel(); // printf("O_k_length: %d\n", O_k_length); MALLOC(result_ptr, sizeof(complex_t) * O_k_length, psi.gpu); // MEMSET(result_ptr, 0, sizeof(complex_t) * O_k_length, psi.gpu); const auto functor = [=] __host__ __device__ () { #include "cuda_kernel_defines.h" SHARED typename Psi_t::Angles angles; angles.init(psi_kernel, spins); psi_kernel.foreach_O_k( spins, angles, [&](const unsigned int k, const complex_t& O_k_element) { // printf("%d, %f, %f\n", k, O_k_element.real(), O_k_element.imag()); result_ptr[k] = O_k_element; } ); }; if(psi.gpu) { cuda_kernel<<<1, psi.get_width()>>>(functor); } else { functor(); } MEMCPY_TO_HOST(result, result_ptr, sizeof(complex_t) * O_k_length, psi.gpu); FREE(result_ptr, psi.gpu); } template<typename Psi_t, typename SpinEnsemble> void psi_O_k_vector(complex<double>* result, complex<double>* result_std, const Psi_t& psi, SpinEnsemble& spin_ensemble) { const auto O_k_length = psi.get_num_params(); const auto psi_kernel = psi.get_kernel(); complex_t* result_device; complex_t* result2_device; MALLOC(result_device, sizeof(complex_t) * O_k_length, psi.gpu); MALLOC(result2_device, sizeof(complex_t) * O_k_length, psi.gpu); MEMSET(result_device, 0, sizeof(complex_t) * O_k_length, psi.gpu); MEMSET(result2_device, 0, sizeof(complex_t) * O_k_length, psi.gpu); spin_ensemble.foreach( psi, [=] __device__ __host__ ( const unsigned int spin_index, const Spins spins, const complex_t log_psi, typename Psi_t::Angles& angles, const double weight ) { psi_kernel.foreach_O_k( spins, angles, [&](const unsigned int k, const complex_t& O_k_element) { generic_atomicAdd(&result_device[k], weight * O_k_element); const auto O_k_element2 = complex_t( O_k_element.real() * O_k_element.real(), O_k_element.imag() * O_k_element.imag() ); generic_atomicAdd(&result2_device[k], weight * O_k_element2); } ); } ); MEMCPY_TO_HOST(result, result_device, sizeof(complex_t) * O_k_length, psi.gpu); MEMCPY_TO_HOST(result_std, result2_device, sizeof(complex_t) * O_k_length, psi.gpu); FREE(result_device, psi.gpu); FREE(result2_device, psi.gpu); for(auto k = 0u; k < O_k_length; k++) { result[k] /= spin_ensemble.get_num_steps(); result_std[k] /= spin_ensemble.get_num_steps(); result_std[k] = result_std[k] - complex<double>( result[k].real() * result[k].real(), result[k].imag() * result[k].imag() ); } } template<typename Psi_t, typename SpinEnsemble> pair<Array<complex_t>, Array<double>> psi_O_k_vector(const Psi_t& psi, SpinEnsemble& spin_ensemble) { const auto O_k_length = psi.get_num_params(); const auto psi_kernel = psi.get_kernel(); Array<complex_t> result(O_k_length, psi.gpu); Array<double> result_std(O_k_length, psi.gpu); result.clear(); result_std.clear(); auto result_ptr = result.data(); auto result_std_ptr = result_std.data(); spin_ensemble.foreach( psi, [=] __device__ __host__ ( const unsigned int spin_index, const Spins spins, const complex_t log_psi, typename Psi_t::Angles& angles, const double weight ) { psi_kernel.foreach_O_k( spins, angles, [&](const unsigned int k, const complex_t& O_k_element) { generic_atomicAdd(&result_ptr[k], weight * O_k_element); generic_atomicAdd(&result_std_ptr[k], weight * (O_k_element * conj(O_k_element)).real()); } ); } ); result.update_host(); result_std.update_host(); for(auto k = 0u; k < O_k_length; k++) { result[k] /= spin_ensemble.get_num_steps(); result_std[k] /= spin_ensemble.get_num_steps(); result_std[k] = sqrt((result_std[k] - result[k] * conj(result[k])).real()); } return {result, result_std}; } #ifdef ENABLE_PSI template void psi_O_k_vector(complex<double>* result, const Psi& psi, const Spins& spins); #endif // ENABLE_PSI #ifdef ENABLE_PSI_DEEP template void psi_O_k_vector(complex<double>* result, const PsiDeep& psi, const Spins& spins); #endif // ENABLE_PSI_DEEP #ifdef ENABLE_PSI_PAIR // template void psi_O_k_vector(complex<double>* result, const PsiPair& psi, const Spins& spins); #endif // ENABLE_PSI_PAIR } // namespace rbm_on_gpu
ae87076c1bea7d9c37221ef0f7a485b8f97822e0.hip
// !!! This is a file automatically generated by hipify!!! #include "nne.cuh" #include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" #include "device_launch_parameters.h" #include <cstdlib> __global__ void nodeCal(float* inList, float* wList, float* outList); __global__ void nodeLog(float* outputList, float sigmoidConst); __global__ void nodeGradCal(float* wList, float* outputList, float* gradList); __global__ void nodeDelLog(float* inputList, float* gradList, float sigmoidConst); __global__ void nodeLearn(float *inputList, float *delList, float *weightList, float learningFactor, int inputNum); Node::Node() : output(0), input(0), localGrad(0) { inputWeightList.push_back(0); } Node::Node(int inputNum) : output(0), input(0), localGrad(0) { inputWeightList.push_back(0); for (int i = 0; i < inputNum; i++) { inputWeightList.push_back((float)rand() / RAND_MAX); } } Node::Node(std::vector<float>& inputWeightList, int nodeIndex, int inputWeightLength) : output(0), input(0), localGrad(0) { inputWeightLength++; int offset = nodeIndex * inputWeightLength; //inputWeightList.push_back(0); for (int i = 0; i < inputWeightLength; i++) { this->inputWeightList.push_back(inputWeightList[offset + i]); } } Node::~Node() {} Layer::Layer() : sigmoidConst(0.01) {} Layer::Layer(int nodeListLength, int inputWeightLength, float sigmoidConst){ Node* newNode; this->sigmoidConst = sigmoidConst; for (int i = 0; i < nodeListLength; i++) { newNode = new Node(inputWeightLength); nodeList.push_back(newNode); } } Layer::Layer(std::vector<float>& inputWeightList, int nodeListLength, int inputWeightLength, float sigmoidConst) { Node* newNode; this->sigmoidConst = sigmoidConst; for (int i = 0; i < nodeListLength; i++) { newNode = new Node(inputWeightList, i, inputWeightLength); nodeList.push_back(newNode); } } Layer::~Layer() { int length = nodeList.size(); for (int i = 0; i < length; i++) { delete nodeList[i]; } } void Layer::forwardCal(std::vector<float>& inputList) { int inputNum = inputList.size() + 1; int outputNum = nodeList.size(); std::vector<float> weightList; float* outputList = new float[outputNum]; float *dInputList, *dWeightList, *dOutputList; hipMalloc(&dInputList, inputNum * sizeof(float)); hipMalloc(&dWeightList, inputNum * outputNum * sizeof(float)); hipMalloc(&dOutputList, outputNum * sizeof(float)); inputList.insert(inputList.begin(), 1); hipMemcpy(dInputList, inputList.data(), inputNum * sizeof(float), hipMemcpyHostToDevice); for (int i = 0; i < outputNum; i++) { weightList.insert(weightList.end(), nodeList[i]->inputWeightList.begin(), nodeList[i]->inputWeightList.end()); } hipMemcpy(dWeightList, weightList.data(), inputNum * outputNum * sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(dOutputList, outputList, outputNum * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( nodeCal) , dim3(outputNum), dim3(inputNum), sizeof(float) * inputNum, 0, dInputList, dWeightList, dOutputList); hipMemcpy(outputList, dOutputList, outputNum * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < outputNum; i++) { nodeList[i]->input = outputList[i]; } hipLaunchKernelGGL(( nodeLog) , dim3(1), dim3(outputNum) , 0, 0, dOutputList, sigmoidConst); hipMemcpy(outputList, dOutputList, outputNum * sizeof(float), hipMemcpyDeviceToHost); hipFree(dInputList); hipFree(dWeightList); hipFree(dOutputList); for (int i = 0; i < outputNum; i++) { nodeList[i]->output = outputList[i]; } delete outputList; inputList.erase(inputList.begin()); } void Layer::forwardCal(Layer& bLayer){ std::vector<Node*> &bNodeList = bLayer.nodeList; int inputNum = bNodeList.size(); int outputNum = nodeList.size(); std::vector<float> inputList; std::vector<float> weightList; float* outputList = new float[outputNum]; float *dInputList, *dWeightList, *dOutputList; hipMalloc(&dInputList, (inputNum + 1) * sizeof(float)); hipMalloc(&dWeightList, (inputNum + 1) * outputNum * sizeof(float)); hipMalloc(&dOutputList, outputNum * sizeof(float)); inputList.push_back(1); for (int i = 0; i < inputNum; i++) { inputList.push_back((*bNodeList[i]).output); } inputNum++; hipMemcpy(dInputList, inputList.data(), inputNum * sizeof(float), hipMemcpyHostToDevice); for (int i = 0; i < outputNum; i++) { weightList.insert(weightList.end(), nodeList[i]->inputWeightList.begin(), nodeList[i]->inputWeightList.end()); } hipMemcpy(dWeightList, weightList.data(), inputNum * outputNum * sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(dOutputList, outputList, outputNum * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( nodeCal) , dim3(outputNum), dim3(inputNum), sizeof(float) * inputNum , 0, dInputList, dWeightList, dOutputList); hipMemcpy(outputList, dOutputList, outputNum * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < outputNum; i++) { nodeList[i]->input = outputList[i]; } hipLaunchKernelGGL(( nodeLog) , dim3(1), dim3(outputNum) , 0, 0, dOutputList, sigmoidConst); hipMemcpy(outputList, dOutputList, outputNum * sizeof(float), hipMemcpyDeviceToHost); hipFree(dInputList); hipFree(dWeightList); hipFree(dOutputList); for (int i = 0; i < outputNum; i++) { nodeList[i]->output = outputList[i]; } delete outputList; } void Layer::getGrad(Layer& fLayer) { int inputNum = nodeList.size(); int outputNum = fLayer.nodeList.size(); std::vector<Node*> &fNodeList = fLayer.nodeList; std::vector<float> inputList; std::vector<float> weightList; std::vector<float> outputList; float *gradList = new float[inputNum]; float *dInputList, *dWeightList, *dOutputList, *dGradList; hipMalloc(&dInputList, inputNum * sizeof(float)); hipMalloc(&dWeightList, inputNum * outputNum * sizeof(float)); hipMalloc(&dOutputList, outputNum * sizeof(float)); hipMalloc(&dGradList, inputNum * sizeof(float)); for (int i = 0; i < inputNum; i++) { inputList.push_back(nodeList[i]->input); } hipMemcpy(dInputList, inputList.data(), inputNum * sizeof(float), hipMemcpyHostToDevice); for (int i = 0; i < outputNum; i++) { outputList.push_back(fNodeList[i]->localGrad); weightList.insert(weightList.end(), ++(fNodeList[i]->inputWeightList.begin()), fNodeList[i]->inputWeightList.end()); } hipMemcpy(dOutputList, outputList.data(), outputNum * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dWeightList, weightList.data(), inputNum * outputNum * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( nodeGradCal) , dim3(inputNum), dim3(outputNum), sizeof(float) * outputNum , 0, dWeightList, dOutputList, dGradList); hipMemcpy(gradList, dGradList, inputNum * sizeof(float), hipMemcpyDeviceToHost); hipLaunchKernelGGL(( nodeDelLog) , dim3(1), dim3(inputNum) , 0, 0, dInputList, dGradList, sigmoidConst); hipMemcpy(gradList, dGradList, inputNum * sizeof(float), hipMemcpyDeviceToHost); hipFree(dInputList); hipFree(dWeightList); hipFree(dOutputList); hipFree(dGradList); for (int i = 0; i < inputNum; i++) { nodeList[i]->localGrad = gradList[i]; } delete gradList; } float Layer::getGrad(std::vector<float>& answerList) { int inputNum = nodeList.size(); std::vector<float> inputList; std::vector<float> outputList; float *gradList = new float[inputNum]; float *dInputList, *dOutputList, *dGradList, mse = 0; hipMalloc(&dInputList, inputNum * sizeof(float)); hipMalloc(&dOutputList, inputNum * sizeof(float)); hipMalloc(&dGradList, inputNum * sizeof(float)); for (int i = 0; i < inputNum; i++) { inputList.push_back(nodeList[i]->input); outputList.push_back(answerList[i] - nodeList[i]->output); } memcpy(gradList, outputList.data(), inputNum * sizeof(float)); hipMemcpy(dInputList, inputList.data(), inputNum * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dGradList, gradList, inputNum * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( nodeDelLog) , dim3(1), dim3(inputNum) , 0, 0, dInputList, dGradList, sigmoidConst); hipMemcpy(gradList, dGradList, inputNum * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < inputNum; i++) { nodeList[i]->localGrad = gradList[i]; } hipFree(dInputList); hipFree(dOutputList); hipFree(dGradList); delete gradList; for (int i = 0; i < inputNum; i++) { mse += outputList[i] * outputList[i]; } mse /= inputNum; return mse; } void Layer::learnWeight(Layer& bLayer, float learningFactor) { std::vector<Node*> &bNodeList = bLayer.nodeList; int inputNum = bNodeList.size(); int outputNum = nodeList.size(); std::vector<float> inputList; std::vector<float> delList; float *weightList = new float[(inputNum + 1) * outputNum]; float *dInputList, *dDelList, *dWeightList; dim3 threadGrid(inputNum + 1, outputNum); hipMalloc(&dInputList, (inputNum + 1) * sizeof(float)); hipMalloc(&dDelList, outputNum * sizeof(float)); hipMalloc(&dWeightList, (inputNum + 1) * outputNum * sizeof(float)); inputList.push_back(1); for (int i = 0; i < inputNum; i++) { inputList.push_back(bNodeList[i]->output); } inputNum++; hipMemcpy(dInputList, inputList.data(), inputNum * sizeof(float), hipMemcpyHostToDevice); for (int i = 0; i < outputNum; i++) { memcpy(weightList + i * inputNum, nodeList[i]->inputWeightList.data(), inputNum * sizeof(float)); delList.push_back(nodeList[i]->localGrad); } hipMemcpy(dWeightList, weightList, inputNum * outputNum * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dDelList, delList.data(), outputNum * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( nodeLearn) , dim3(1), dim3(threadGrid) , 0, 0, dInputList, dDelList, dWeightList, learningFactor, inputNum); hipMemcpy(weightList, dWeightList, inputNum * outputNum * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < outputNum; i++) { for (int j = 0; j < inputNum; j++) { nodeList[i]->inputWeightList[j] = weightList[i * inputNum + j]; } } hipFree(dInputList); hipFree(dDelList); hipFree(dWeightList); delete weightList; } void Layer::learnWeight(std::vector<float>& inputList, float learningFactor){ int inputNum = inputList.size() + 1; int outputNum = nodeList.size(); std::vector<float> delList; float *weightList = new float[inputNum * outputNum]; float *dInputList, *dDelList, *dWeightList; dim3 threadGrid(inputNum, outputNum); hipMalloc(&dInputList, inputNum * sizeof(float)); hipMalloc(&dDelList, outputNum * sizeof(float)); hipMalloc(&dWeightList, inputNum * outputNum * sizeof(float)); inputList.insert(inputList.begin(), 1); hipMemcpy(dInputList, inputList.data(), inputNum * sizeof(float), hipMemcpyHostToDevice); for (int i = 0; i < outputNum; i++) { memcpy(weightList + i * inputNum, nodeList[i]->inputWeightList.data(), inputNum * sizeof(float)); delList.push_back(nodeList[i]->localGrad); } hipMemcpy(dWeightList, weightList, inputNum * outputNum * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dDelList, delList.data(), outputNum * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( nodeLearn) , dim3(1), dim3(threadGrid) , 0, 0, dInputList, dDelList, dWeightList, learningFactor, inputNum); hipMemcpy(weightList, dWeightList, inputNum * outputNum * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < outputNum; i++) { for (int j = 0; j < inputNum; j++) { nodeList[i]->inputWeightList[j] = weightList[i * inputNum + j]; } } hipFree(dInputList); hipFree(dDelList); hipFree(dWeightList); delete weightList; inputList.erase(inputList.begin()); } __global__ void nodeCal(float* inputList, float* weightList, float* outputList){ //int outputIdx = blockIdx.x * inputNum + threadIdx.x ; int outputIdx = blockIdx.x * blockDim.x + threadIdx.x; float result = 0; extern __shared__ float results[]; results[threadIdx.x] = inputList[threadIdx.x] * weightList[outputIdx]; __syncthreads(); //for (int i = 0; i < inputNum; i++) { for (int i = 0; i < blockDim.x; i++) { result += results[i]; } outputList[blockIdx.x] = result; } __global__ void nodeLog(float* outputList, float sigmoidConst) { outputList[threadIdx.x] = tanh(sigmoidConst * outputList[threadIdx.x]); } __global__ void nodeGradCal(float* wList, float* outputList, float* gradList) { int weightIdx = blockIdx.x + threadIdx.x * gridDim.x; extern __shared__ float results[]; float result = 0; results[threadIdx.x] = outputList[threadIdx.x] * wList[weightIdx]; __syncthreads(); for (int i = 0; i < blockDim.x; i++) { result += results[i]; } gradList[blockIdx.x] = result; } __global__ void nodeDelLog(float* inputList, float* gradList, float sigmoidConst) { float temp; temp = cosh(sigmoidConst * inputList[threadIdx.x]); temp *= temp; temp = sigmoidConst / temp; gradList[threadIdx.x] *= temp; } __global__ void nodeLearn(float *inputList, float *delList, float *weightList, float learningFactor, int inputNum) { int weightIdx = threadIdx.x + threadIdx.y * inputNum; //int weightIdx = threadIdx.x + threadIdx.y * blockDim.x; weightList[weightIdx] += inputList[threadIdx.x] * delList[threadIdx.y] * learningFactor; }
ae87076c1bea7d9c37221ef0f7a485b8f97822e0.cu
#include "nne.cuh" #include "cuda.h" #include "cuda_runtime_api.h" #include "device_launch_parameters.h" #include <cstdlib> __global__ void nodeCal(float* inList, float* wList, float* outList); __global__ void nodeLog(float* outputList, float sigmoidConst); __global__ void nodeGradCal(float* wList, float* outputList, float* gradList); __global__ void nodeDelLog(float* inputList, float* gradList, float sigmoidConst); __global__ void nodeLearn(float *inputList, float *delList, float *weightList, float learningFactor, int inputNum); Node::Node() : output(0), input(0), localGrad(0) { inputWeightList.push_back(0); } Node::Node(int inputNum) : output(0), input(0), localGrad(0) { inputWeightList.push_back(0); for (int i = 0; i < inputNum; i++) { inputWeightList.push_back((float)rand() / RAND_MAX); } } Node::Node(std::vector<float>& inputWeightList, int nodeIndex, int inputWeightLength) : output(0), input(0), localGrad(0) { inputWeightLength++; int offset = nodeIndex * inputWeightLength; //inputWeightList.push_back(0); for (int i = 0; i < inputWeightLength; i++) { this->inputWeightList.push_back(inputWeightList[offset + i]); } } Node::~Node() {} Layer::Layer() : sigmoidConst(0.01) {} Layer::Layer(int nodeListLength, int inputWeightLength, float sigmoidConst){ Node* newNode; this->sigmoidConst = sigmoidConst; for (int i = 0; i < nodeListLength; i++) { newNode = new Node(inputWeightLength); nodeList.push_back(newNode); } } Layer::Layer(std::vector<float>& inputWeightList, int nodeListLength, int inputWeightLength, float sigmoidConst) { Node* newNode; this->sigmoidConst = sigmoidConst; for (int i = 0; i < nodeListLength; i++) { newNode = new Node(inputWeightList, i, inputWeightLength); nodeList.push_back(newNode); } } Layer::~Layer() { int length = nodeList.size(); for (int i = 0; i < length; i++) { delete nodeList[i]; } } void Layer::forwardCal(std::vector<float>& inputList) { int inputNum = inputList.size() + 1; int outputNum = nodeList.size(); std::vector<float> weightList; float* outputList = new float[outputNum]; float *dInputList, *dWeightList, *dOutputList; cudaMalloc(&dInputList, inputNum * sizeof(float)); cudaMalloc(&dWeightList, inputNum * outputNum * sizeof(float)); cudaMalloc(&dOutputList, outputNum * sizeof(float)); inputList.insert(inputList.begin(), 1); cudaMemcpy(dInputList, inputList.data(), inputNum * sizeof(float), cudaMemcpyHostToDevice); for (int i = 0; i < outputNum; i++) { weightList.insert(weightList.end(), nodeList[i]->inputWeightList.begin(), nodeList[i]->inputWeightList.end()); } cudaMemcpy(dWeightList, weightList.data(), inputNum * outputNum * sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(dOutputList, outputList, outputNum * sizeof(float), cudaMemcpyHostToDevice); nodeCal <<<outputNum, inputNum, sizeof(float) * inputNum>>> (dInputList, dWeightList, dOutputList); cudaMemcpy(outputList, dOutputList, outputNum * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < outputNum; i++) { nodeList[i]->input = outputList[i]; } nodeLog <<<1, outputNum >>> (dOutputList, sigmoidConst); cudaMemcpy(outputList, dOutputList, outputNum * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(dInputList); cudaFree(dWeightList); cudaFree(dOutputList); for (int i = 0; i < outputNum; i++) { nodeList[i]->output = outputList[i]; } delete outputList; inputList.erase(inputList.begin()); } void Layer::forwardCal(Layer& bLayer){ std::vector<Node*> &bNodeList = bLayer.nodeList; int inputNum = bNodeList.size(); int outputNum = nodeList.size(); std::vector<float> inputList; std::vector<float> weightList; float* outputList = new float[outputNum]; float *dInputList, *dWeightList, *dOutputList; cudaMalloc(&dInputList, (inputNum + 1) * sizeof(float)); cudaMalloc(&dWeightList, (inputNum + 1) * outputNum * sizeof(float)); cudaMalloc(&dOutputList, outputNum * sizeof(float)); inputList.push_back(1); for (int i = 0; i < inputNum; i++) { inputList.push_back((*bNodeList[i]).output); } inputNum++; cudaMemcpy(dInputList, inputList.data(), inputNum * sizeof(float), cudaMemcpyHostToDevice); for (int i = 0; i < outputNum; i++) { weightList.insert(weightList.end(), nodeList[i]->inputWeightList.begin(), nodeList[i]->inputWeightList.end()); } cudaMemcpy(dWeightList, weightList.data(), inputNum * outputNum * sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(dOutputList, outputList, outputNum * sizeof(float), cudaMemcpyHostToDevice); nodeCal <<<outputNum, inputNum, sizeof(float) * inputNum >>> (dInputList, dWeightList, dOutputList); cudaMemcpy(outputList, dOutputList, outputNum * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < outputNum; i++) { nodeList[i]->input = outputList[i]; } nodeLog <<<1, outputNum >>> (dOutputList, sigmoidConst); cudaMemcpy(outputList, dOutputList, outputNum * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(dInputList); cudaFree(dWeightList); cudaFree(dOutputList); for (int i = 0; i < outputNum; i++) { nodeList[i]->output = outputList[i]; } delete outputList; } void Layer::getGrad(Layer& fLayer) { int inputNum = nodeList.size(); int outputNum = fLayer.nodeList.size(); std::vector<Node*> &fNodeList = fLayer.nodeList; std::vector<float> inputList; std::vector<float> weightList; std::vector<float> outputList; float *gradList = new float[inputNum]; float *dInputList, *dWeightList, *dOutputList, *dGradList; cudaMalloc(&dInputList, inputNum * sizeof(float)); cudaMalloc(&dWeightList, inputNum * outputNum * sizeof(float)); cudaMalloc(&dOutputList, outputNum * sizeof(float)); cudaMalloc(&dGradList, inputNum * sizeof(float)); for (int i = 0; i < inputNum; i++) { inputList.push_back(nodeList[i]->input); } cudaMemcpy(dInputList, inputList.data(), inputNum * sizeof(float), cudaMemcpyHostToDevice); for (int i = 0; i < outputNum; i++) { outputList.push_back(fNodeList[i]->localGrad); weightList.insert(weightList.end(), ++(fNodeList[i]->inputWeightList.begin()), fNodeList[i]->inputWeightList.end()); } cudaMemcpy(dOutputList, outputList.data(), outputNum * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dWeightList, weightList.data(), inputNum * outputNum * sizeof(float), cudaMemcpyHostToDevice); nodeGradCal <<<inputNum, outputNum, sizeof(float) * outputNum >>> (dWeightList, dOutputList, dGradList); cudaMemcpy(gradList, dGradList, inputNum * sizeof(float), cudaMemcpyDeviceToHost); nodeDelLog <<<1, inputNum >>> (dInputList, dGradList, sigmoidConst); cudaMemcpy(gradList, dGradList, inputNum * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(dInputList); cudaFree(dWeightList); cudaFree(dOutputList); cudaFree(dGradList); for (int i = 0; i < inputNum; i++) { nodeList[i]->localGrad = gradList[i]; } delete gradList; } float Layer::getGrad(std::vector<float>& answerList) { int inputNum = nodeList.size(); std::vector<float> inputList; std::vector<float> outputList; float *gradList = new float[inputNum]; float *dInputList, *dOutputList, *dGradList, mse = 0; cudaMalloc(&dInputList, inputNum * sizeof(float)); cudaMalloc(&dOutputList, inputNum * sizeof(float)); cudaMalloc(&dGradList, inputNum * sizeof(float)); for (int i = 0; i < inputNum; i++) { inputList.push_back(nodeList[i]->input); outputList.push_back(answerList[i] - nodeList[i]->output); } memcpy(gradList, outputList.data(), inputNum * sizeof(float)); cudaMemcpy(dInputList, inputList.data(), inputNum * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dGradList, gradList, inputNum * sizeof(float), cudaMemcpyHostToDevice); nodeDelLog <<<1, inputNum >>> (dInputList, dGradList, sigmoidConst); cudaMemcpy(gradList, dGradList, inputNum * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < inputNum; i++) { nodeList[i]->localGrad = gradList[i]; } cudaFree(dInputList); cudaFree(dOutputList); cudaFree(dGradList); delete gradList; for (int i = 0; i < inputNum; i++) { mse += outputList[i] * outputList[i]; } mse /= inputNum; return mse; } void Layer::learnWeight(Layer& bLayer, float learningFactor) { std::vector<Node*> &bNodeList = bLayer.nodeList; int inputNum = bNodeList.size(); int outputNum = nodeList.size(); std::vector<float> inputList; std::vector<float> delList; float *weightList = new float[(inputNum + 1) * outputNum]; float *dInputList, *dDelList, *dWeightList; dim3 threadGrid(inputNum + 1, outputNum); cudaMalloc(&dInputList, (inputNum + 1) * sizeof(float)); cudaMalloc(&dDelList, outputNum * sizeof(float)); cudaMalloc(&dWeightList, (inputNum + 1) * outputNum * sizeof(float)); inputList.push_back(1); for (int i = 0; i < inputNum; i++) { inputList.push_back(bNodeList[i]->output); } inputNum++; cudaMemcpy(dInputList, inputList.data(), inputNum * sizeof(float), cudaMemcpyHostToDevice); for (int i = 0; i < outputNum; i++) { memcpy(weightList + i * inputNum, nodeList[i]->inputWeightList.data(), inputNum * sizeof(float)); delList.push_back(nodeList[i]->localGrad); } cudaMemcpy(dWeightList, weightList, inputNum * outputNum * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dDelList, delList.data(), outputNum * sizeof(float), cudaMemcpyHostToDevice); nodeLearn <<<1, threadGrid >>> (dInputList, dDelList, dWeightList, learningFactor, inputNum); cudaMemcpy(weightList, dWeightList, inputNum * outputNum * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < outputNum; i++) { for (int j = 0; j < inputNum; j++) { nodeList[i]->inputWeightList[j] = weightList[i * inputNum + j]; } } cudaFree(dInputList); cudaFree(dDelList); cudaFree(dWeightList); delete weightList; } void Layer::learnWeight(std::vector<float>& inputList, float learningFactor){ int inputNum = inputList.size() + 1; int outputNum = nodeList.size(); std::vector<float> delList; float *weightList = new float[inputNum * outputNum]; float *dInputList, *dDelList, *dWeightList; dim3 threadGrid(inputNum, outputNum); cudaMalloc(&dInputList, inputNum * sizeof(float)); cudaMalloc(&dDelList, outputNum * sizeof(float)); cudaMalloc(&dWeightList, inputNum * outputNum * sizeof(float)); inputList.insert(inputList.begin(), 1); cudaMemcpy(dInputList, inputList.data(), inputNum * sizeof(float), cudaMemcpyHostToDevice); for (int i = 0; i < outputNum; i++) { memcpy(weightList + i * inputNum, nodeList[i]->inputWeightList.data(), inputNum * sizeof(float)); delList.push_back(nodeList[i]->localGrad); } cudaMemcpy(dWeightList, weightList, inputNum * outputNum * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dDelList, delList.data(), outputNum * sizeof(float), cudaMemcpyHostToDevice); nodeLearn <<<1, threadGrid >>> (dInputList, dDelList, dWeightList, learningFactor, inputNum); cudaMemcpy(weightList, dWeightList, inputNum * outputNum * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < outputNum; i++) { for (int j = 0; j < inputNum; j++) { nodeList[i]->inputWeightList[j] = weightList[i * inputNum + j]; } } cudaFree(dInputList); cudaFree(dDelList); cudaFree(dWeightList); delete weightList; inputList.erase(inputList.begin()); } __global__ void nodeCal(float* inputList, float* weightList, float* outputList){ //int outputIdx = blockIdx.x * inputNum + threadIdx.x ; int outputIdx = blockIdx.x * blockDim.x + threadIdx.x; float result = 0; extern __shared__ float results[]; results[threadIdx.x] = inputList[threadIdx.x] * weightList[outputIdx]; __syncthreads(); //for (int i = 0; i < inputNum; i++) { for (int i = 0; i < blockDim.x; i++) { result += results[i]; } outputList[blockIdx.x] = result; } __global__ void nodeLog(float* outputList, float sigmoidConst) { outputList[threadIdx.x] = tanh(sigmoidConst * outputList[threadIdx.x]); } __global__ void nodeGradCal(float* wList, float* outputList, float* gradList) { int weightIdx = blockIdx.x + threadIdx.x * gridDim.x; extern __shared__ float results[]; float result = 0; results[threadIdx.x] = outputList[threadIdx.x] * wList[weightIdx]; __syncthreads(); for (int i = 0; i < blockDim.x; i++) { result += results[i]; } gradList[blockIdx.x] = result; } __global__ void nodeDelLog(float* inputList, float* gradList, float sigmoidConst) { float temp; temp = cosh(sigmoidConst * inputList[threadIdx.x]); temp *= temp; temp = sigmoidConst / temp; gradList[threadIdx.x] *= temp; } __global__ void nodeLearn(float *inputList, float *delList, float *weightList, float learningFactor, int inputNum) { int weightIdx = threadIdx.x + threadIdx.y * inputNum; //int weightIdx = threadIdx.x + threadIdx.y * blockDim.x; weightList[weightIdx] += inputList[threadIdx.x] * delList[threadIdx.y] * learningFactor; }
7b6b5614c0b75618e0fec7faa80a25ddf081bc4f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "uplo_erfc.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int unit = 1; const int bottom = 1; const REAL *a = NULL; hipMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; hipMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( uplo_erfc), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( uplo_erfc), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( uplo_erfc), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7b6b5614c0b75618e0fec7faa80a25ddf081bc4f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "uplo_erfc.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int unit = 1; const int bottom = 1; const REAL *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); uplo_erfc<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { uplo_erfc<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { uplo_erfc<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6e21199263c0bdc5253542394dc13e69b3ef3177.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHTensorRandom.h" #include "THHDeviceUtils.cuh" #include "THHGeneral.h" #include "THHTensorCopy.h" #include "THHTensorMath.h" #include "THHReduceApplyUtils.cuh" #include "THHTensorRandom.cuh" #include "THHGenerator.hpp" #include "ATen/Config.h" #include "ATen/hip/_curand_mtgp32_host.h" #include <thrust/functional.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define MAX_NUM_BLOCKS 64 #define BLOCK_SIZE 256 THCGenerator* THCRandom_getGenerator(THCState* state); /* Sets up generator. Allocates but does not create the generator states. Not thread-safe. */ __host__ void initializeGenerator(THCState *state, THCGenerator* gen) { gen->state.gen_states = static_cast<struct hiprandStateMtgp32_t*>(THCudaMalloc(state, MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t))); gen->state.kernel_params = static_cast<mtgp32_kernel_params_t*>(THCudaMalloc(state, sizeof(mtgp32_kernel_params_t))); } /* Creates a new generator state given the seed. Not thread-safe. */ __host__ void createGeneratorState(THCGenerator* gen, uint64_t seed) { if (hiprandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->state.kernel_params) != HIPRAND_STATUS_SUCCESS) { THError("Creating MTGP constants failed."); } if (hiprandMakeMTGP32KernelState(gen->state.gen_states, mtgp32dc_params_fast_11213, gen->state.kernel_params, MAX_NUM_BLOCKS, seed) != HIPRAND_STATUS_SUCCESS) { THError("Creating MTGP kernel state failed."); } } __host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state) { THCGenerator* gen = THCRandom_getGenerator(state); std::lock_guard<std::mutex> lock(gen->mutex); // The RNG state comprises the MTPG32 states, the seed, and an offset used for Philox static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t); static const size_t seed_size = sizeof(gen->state.initial_seed); static const size_t offset_size = sizeof(gen->state.philox_seed_offset); static const size_t total_size = states_size + seed_size + offset_size; THByteTensor_resize1d(rng_state, total_size); THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(hipMemcpy(THByteTensor_data(rng_state), gen->state.gen_states, states_size, hipMemcpyDeviceToHost)); memcpy(THByteTensor_data(rng_state) + states_size, &gen->state.initial_seed, seed_size); memcpy(THByteTensor_data(rng_state) + states_size + seed_size, &gen->state.philox_seed_offset, offset_size); } __global__ void set_rngstate_kernel(hiprandStateMtgp32_t *state, mtgp32_kernel_params_t *kernel) { state[threadIdx.x].k = kernel; } __host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state) { THCGenerator* gen = THCRandom_getGenerator(state); std::lock_guard<std::mutex> lock(gen->mutex); static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t); static const size_t seed_size = sizeof(gen->state.initial_seed); static const size_t offset_size = sizeof(gen->state.philox_seed_offset); static const size_t total_size = states_size + seed_size + offset_size; bool no_philox_seed = false; if (THByteTensor_nElement(rng_state) == total_size - offset_size) { no_philox_seed = true; } else { THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); } THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(hipMemcpy(gen->state.gen_states, THByteTensor_data(rng_state), states_size, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( set_rngstate_kernel), dim3(1), dim3(MAX_NUM_BLOCKS), 0, THCState_getCurrentStream(state), gen->state.gen_states, gen->state.kernel_params); memcpy(&gen->state.initial_seed, THByteTensor_data(rng_state) + states_size, seed_size); if (!no_philox_seed) { memcpy(&gen->state.philox_seed_offset, THByteTensor_data(rng_state) + states_size + seed_size, offset_size); } else { gen->state.philox_seed_offset = 0; } } // Goes from (0, 1] to [0, 1). Note 1-x is not sufficient since for some floats // eps near 0, 1-eps will round to 1. template <typename T> __device__ inline T reverse_bounds(T value) { if (THCNumerics<T>::eq(value, ScalarConvert<int, T>::to(1))) { return ScalarConvert<int, T>::to(0); } return value; } __device__ inline at::Half half_uniform_scale_and_shift(float x, double a, double b) { at::Half width = ScalarConvert<double, at::Half>::to(b - a); at::Half start = ScalarConvert<double, at::Half>::to(a); at::Half scaled = THCNumerics<at::Half>::mul(reverse_bounds(ScalarConvert<float, at::Half>::to(x)), width); return THCNumerics<at::Half>::add(scaled, start); } #define GENERATE_KERNEL1(NAME, T, ARG1, CURAND_T, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(hiprandStateMtgp32_t *state, int size, T *result, ARG1) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \ for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \ if (i < size) { \ T y = TRANSFORM; \ result[i] = y; \ } \ } \ } #define GENERATE_KERNEL2(NAME, T, ARG1, ARG2, CURAND_T, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(hiprandStateMtgp32_t *state, int size, T *result, ARG1, ARG2) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \ for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \ if (i < size) { \ T y = TRANSFORM; \ result[i] = y; \ } \ } \ } template<typename T, typename U> struct is_same { static const bool value = false; }; template<typename T> struct is_same<T, T> { static const bool value = true; }; template<typename T, typename prob_type> __global__ void generate_bernoulli_tensor(hiprandStateMtgp32_t *state, int size, T *result, prob_type *probs) { int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { if (is_same<prob_type, double>::value) { double x = hiprand_uniform_double(&state[blockIdx.x]); if (i < size) result[i] = ScalarConvert<bool, T>::to(x <= probs[i]); } else { float x = hiprand_uniform(&state[blockIdx.x]); if (i < size) result[i] = ScalarConvert<bool, T>::to(x <= probs[i]); } } } // NOTE: hiprand_uniform is (0, 1] and we want [a, b) GENERATE_KERNEL2(generate_uniform, float, float a, float b, float, hiprand_uniform, reverse_bounds(x) * (b-a) + a) GENERATE_KERNEL2(generate_uniform, float, double a, double b, float, hiprand_uniform, reverse_bounds(x) * (b-a) + a) GENERATE_KERNEL2(generate_uniform, double, double a, double b, double, hiprand_uniform_double, reverse_bounds(x) * (b-a) + a) GENERATE_KERNEL2(generate_normal, float, double mean, double stdv, float, hiprand_normal, (x * stdv) + mean) GENERATE_KERNEL2(generate_normal, double, double mean, double stdv, double, hiprand_normal_double, (x * stdv) + mean) GENERATE_KERNEL1(generate_exponential, float, double lambda, float, hiprand_uniform, (float)(-1. / lambda * log(x))) GENERATE_KERNEL1(generate_exponential, double, double lambda, double, hiprand_uniform_double, (double)(-1. / lambda * log(x))) GENERATE_KERNEL2(generate_cauchy, float, double median, double sigma, float, hiprand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5)))) GENERATE_KERNEL2(generate_cauchy, double, double median, double sigma, double, hiprand_uniform_double, (double)(median + sigma * tan(M_PI*(x-0.5)))) GENERATE_KERNEL2(generate_uniform, at::Half, double a, double b, float, hiprand_uniform, (half_uniform_scale_and_shift(x, a, b))) GENERATE_KERNEL2(generate_normal, at::Half, double mean, double stdv, float, hiprand_normal, (ScalarConvert<float, at::Half>::to((x * stdv) + mean))) GENERATE_KERNEL1(generate_exponential, at::Half, double lambda, float, hiprand_uniform, (ScalarConvert<float, at::Half>::to((float)(-1. / lambda * log(x))))) GENERATE_KERNEL2(generate_cauchy, at::Half, double median, double sigma, float, hiprand_uniform, (ScalarConvert<float, at::Half>::to((float)(median + sigma * tan(M_PI*(x-0.5)))))) #include "generic/THCTensorRandom.cu" #include "THHGenerateAllTypes.h" #undef GENERATE_KERNEL1 #undef GENERATE_KERNEL2
6e21199263c0bdc5253542394dc13e69b3ef3177.cu
#include "THCTensorRandom.h" #include "THCDeviceUtils.cuh" #include "THCGeneral.h" #include "THCTensorCopy.h" #include "THCTensorMath.h" #include "THCReduceApplyUtils.cuh" #include "THCTensorRandom.cuh" #include "THCGenerator.hpp" #include "ATen/Config.h" #include "ATen/cuda/_curand_mtgp32_host.h" #include <thrust/functional.h> #include <curand.h> #include <curand_kernel.h> #define MAX_NUM_BLOCKS 200 #define BLOCK_SIZE 256 THCGenerator* THCRandom_getGenerator(THCState* state); /* Sets up generator. Allocates but does not create the generator states. Not thread-safe. */ __host__ void initializeGenerator(THCState *state, THCGenerator* gen) { gen->state.gen_states = static_cast<struct curandStateMtgp32*>(THCudaMalloc(state, MAX_NUM_BLOCKS * sizeof(curandStateMtgp32))); gen->state.kernel_params = static_cast<mtgp32_kernel_params*>(THCudaMalloc(state, sizeof(mtgp32_kernel_params))); } /* Creates a new generator state given the seed. Not thread-safe. */ __host__ void createGeneratorState(THCGenerator* gen, uint64_t seed) { if (curandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->state.kernel_params) != CURAND_STATUS_SUCCESS) { THError("Creating MTGP constants failed."); } if (curandMakeMTGP32KernelState(gen->state.gen_states, mtgp32dc_params_fast_11213, gen->state.kernel_params, MAX_NUM_BLOCKS, seed) != CURAND_STATUS_SUCCESS) { THError("Creating MTGP kernel state failed."); } } __host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state) { THCGenerator* gen = THCRandom_getGenerator(state); std::lock_guard<std::mutex> lock(gen->mutex); // The RNG state comprises the MTPG32 states, the seed, and an offset used for Philox static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32); static const size_t seed_size = sizeof(gen->state.initial_seed); static const size_t offset_size = sizeof(gen->state.philox_seed_offset); static const size_t total_size = states_size + seed_size + offset_size; THByteTensor_resize1d(rng_state, total_size); THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(cudaMemcpy(THByteTensor_data(rng_state), gen->state.gen_states, states_size, cudaMemcpyDeviceToHost)); memcpy(THByteTensor_data(rng_state) + states_size, &gen->state.initial_seed, seed_size); memcpy(THByteTensor_data(rng_state) + states_size + seed_size, &gen->state.philox_seed_offset, offset_size); } __global__ void set_rngstate_kernel(curandStateMtgp32 *state, mtgp32_kernel_params *kernel) { state[threadIdx.x].k = kernel; } __host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state) { THCGenerator* gen = THCRandom_getGenerator(state); std::lock_guard<std::mutex> lock(gen->mutex); static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32); static const size_t seed_size = sizeof(gen->state.initial_seed); static const size_t offset_size = sizeof(gen->state.philox_seed_offset); static const size_t total_size = states_size + seed_size + offset_size; bool no_philox_seed = false; if (THByteTensor_nElement(rng_state) == total_size - offset_size) { no_philox_seed = true; } else { THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); } THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(cudaMemcpy(gen->state.gen_states, THByteTensor_data(rng_state), states_size, cudaMemcpyHostToDevice)); set_rngstate_kernel<<<1, MAX_NUM_BLOCKS, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, gen->state.kernel_params); memcpy(&gen->state.initial_seed, THByteTensor_data(rng_state) + states_size, seed_size); if (!no_philox_seed) { memcpy(&gen->state.philox_seed_offset, THByteTensor_data(rng_state) + states_size + seed_size, offset_size); } else { gen->state.philox_seed_offset = 0; } } // Goes from (0, 1] to [0, 1). Note 1-x is not sufficient since for some floats // eps near 0, 1-eps will round to 1. template <typename T> __device__ inline T reverse_bounds(T value) { if (THCNumerics<T>::eq(value, ScalarConvert<int, T>::to(1))) { return ScalarConvert<int, T>::to(0); } return value; } __device__ inline at::Half half_uniform_scale_and_shift(float x, double a, double b) { at::Half width = ScalarConvert<double, at::Half>::to(b - a); at::Half start = ScalarConvert<double, at::Half>::to(a); at::Half scaled = THCNumerics<at::Half>::mul(reverse_bounds(ScalarConvert<float, at::Half>::to(x)), width); return THCNumerics<at::Half>::add(scaled, start); } #define GENERATE_KERNEL1(NAME, T, ARG1, CURAND_T, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(curandStateMtgp32 *state, int size, T *result, ARG1) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \ for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \ if (i < size) { \ T y = TRANSFORM; \ result[i] = y; \ } \ } \ } #define GENERATE_KERNEL2(NAME, T, ARG1, ARG2, CURAND_T, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(curandStateMtgp32 *state, int size, T *result, ARG1, ARG2) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \ for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \ if (i < size) { \ T y = TRANSFORM; \ result[i] = y; \ } \ } \ } template<typename T, typename U> struct is_same { static const bool value = false; }; template<typename T> struct is_same<T, T> { static const bool value = true; }; template<typename T, typename prob_type> __global__ void generate_bernoulli_tensor(curandStateMtgp32 *state, int size, T *result, prob_type *probs) { int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { if (is_same<prob_type, double>::value) { double x = curand_uniform_double(&state[blockIdx.x]); if (i < size) result[i] = ScalarConvert<bool, T>::to(x <= probs[i]); } else { float x = curand_uniform(&state[blockIdx.x]); if (i < size) result[i] = ScalarConvert<bool, T>::to(x <= probs[i]); } } } // NOTE: curand_uniform is (0, 1] and we want [a, b) GENERATE_KERNEL2(generate_uniform, float, float a, float b, float, curand_uniform, reverse_bounds(x) * (b-a) + a) GENERATE_KERNEL2(generate_uniform, float, double a, double b, float, curand_uniform, reverse_bounds(x) * (b-a) + a) GENERATE_KERNEL2(generate_uniform, double, double a, double b, double, curand_uniform_double, reverse_bounds(x) * (b-a) + a) GENERATE_KERNEL2(generate_normal, float, double mean, double stdv, float, curand_normal, (x * stdv) + mean) GENERATE_KERNEL2(generate_normal, double, double mean, double stdv, double, curand_normal_double, (x * stdv) + mean) GENERATE_KERNEL1(generate_exponential, float, double lambda, float, curand_uniform, (float)(-1. / lambda * log(x))) GENERATE_KERNEL1(generate_exponential, double, double lambda, double, curand_uniform_double, (double)(-1. / lambda * log(x))) GENERATE_KERNEL2(generate_cauchy, float, double median, double sigma, float, curand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5)))) GENERATE_KERNEL2(generate_cauchy, double, double median, double sigma, double, curand_uniform_double, (double)(median + sigma * tan(M_PI*(x-0.5)))) GENERATE_KERNEL2(generate_uniform, at::Half, double a, double b, float, curand_uniform, (half_uniform_scale_and_shift(x, a, b))) GENERATE_KERNEL2(generate_normal, at::Half, double mean, double stdv, float, curand_normal, (ScalarConvert<float, at::Half>::to((x * stdv) + mean))) GENERATE_KERNEL1(generate_exponential, at::Half, double lambda, float, curand_uniform, (ScalarConvert<float, at::Half>::to((float)(-1. / lambda * log(x))))) GENERATE_KERNEL2(generate_cauchy, at::Half, double median, double sigma, float, curand_uniform, (ScalarConvert<float, at::Half>::to((float)(median + sigma * tan(M_PI*(x-0.5)))))) #include "generic/THCTensorRandom.cu" #include "THCGenerateAllTypes.h" #undef GENERATE_KERNEL1 #undef GENERATE_KERNEL2
c688b29fa34fb12afeff0e64166339ed0cbbe035.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2019, NVIDIA Corporation. All rights reserved. // // This work is made available under the Nvidia Source Code License-NC. // To view a copy of this license, visit // https://nvlabs.github.io/stylegan2/license.html #include <torch/types.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> static __host__ __device__ __forceinline__ int floor_div(int a, int b) { int c = a / b; if (c * b > a) { c--; } return c; } struct UpFirDn2DKernelParams { int up_x; int up_y; int down_x; int down_y; int pad_x0; int pad_x1; int pad_y0; int pad_y1; int major_dim; int in_h; int in_w; int minor_dim; int kernel_h; int kernel_w; int out_h; int out_w; int loop_major; int loop_x; }; template <typename scalar_t, int up_x, int up_y, int down_x, int down_y, int kernel_h, int kernel_w, int tile_out_h, int tile_out_w> __global__ void upfirdn2d_kernel(scalar_t* out, const scalar_t* input, const scalar_t* kernel, const UpFirDn2DKernelParams p) { const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1; const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1; __shared__ volatile float sk[kernel_h][kernel_w]; __shared__ volatile float sx[tile_in_h][tile_in_w]; int minor_idx = blockIdx.x; int tile_out_y = minor_idx / p.minor_dim; minor_idx -= tile_out_y * p.minor_dim; tile_out_y *= tile_out_h; int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w; int major_idx_base = blockIdx.z * p.loop_major; if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | major_idx_base >= p.major_dim) { return; } for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; tap_idx += blockDim.x) { int ky = tap_idx / kernel_w; int kx = tap_idx - ky * kernel_w; scalar_t v = 0.0; if (kx < p.kernel_w & ky < p.kernel_h) { v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)]; } sk[ky][kx] = v; } for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major & major_idx < p.major_dim; loop_major++, major_idx++) { for (int loop_x = 0, tile_out_x = tile_out_x_base; loop_x < p.loop_x & tile_out_x < p.out_w; loop_x++, tile_out_x += tile_out_w) { int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0; int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0; int tile_in_x = floor_div(tile_mid_x, up_x); int tile_in_y = floor_div(tile_mid_y, up_y); __syncthreads(); for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; in_idx += blockDim.x) { int rel_in_y = in_idx / tile_in_w; int rel_in_x = in_idx - rel_in_y * tile_in_w; int in_x = rel_in_x + tile_in_x; int in_y = rel_in_y + tile_in_y; scalar_t v = 0.0; if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) { v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx]; } sx[rel_in_y][rel_in_x] = v; } __syncthreads(); for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; out_idx += blockDim.x) { int rel_out_y = out_idx / tile_out_w; int rel_out_x = out_idx - rel_out_y * tile_out_w; int out_x = rel_out_x + tile_out_x; int out_y = rel_out_y + tile_out_y; int mid_x = tile_mid_x + rel_out_x * down_x; int mid_y = tile_mid_y + rel_out_y * down_y; int in_x = floor_div(mid_x, up_x); int in_y = floor_div(mid_y, up_y); int rel_in_x = in_x - tile_in_x; int rel_in_y = in_y - tile_in_y; int kernel_x = (in_x + 1) * up_x - mid_x - 1; int kernel_y = (in_y + 1) * up_y - mid_y - 1; scalar_t v = 0.0; #pragma unroll for (int y = 0; y < kernel_h / up_y; y++) #pragma unroll for (int x = 0; x < kernel_w / up_x; x++) v += sx[rel_in_y + y][rel_in_x + x] * sk[kernel_y + y * up_y][kernel_x + x * up_x]; if (out_x < p.out_w & out_y < p.out_h) { out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v; } } } } } torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel, int up_x, int up_y, int down_x, int down_y, int pad_x0, int pad_x1, int pad_y0, int pad_y1) { int curDevice = -1; hipGetDevice(&curDevice); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice); UpFirDn2DKernelParams p; auto x = input.contiguous(); auto k = kernel.contiguous(); p.major_dim = x.size(0); p.in_h = x.size(1); p.in_w = x.size(2); p.minor_dim = x.size(3); p.kernel_h = k.size(0); p.kernel_w = k.size(1); p.up_x = up_x; p.up_y = up_y; p.down_x = down_x; p.down_y = down_y; p.pad_x0 = pad_x0; p.pad_x1 = pad_x1; p.pad_y0 = pad_y0; p.pad_y1 = pad_y1; p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / p.down_y; p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / p.down_x; auto out = at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options()); int mode = -1; int tile_out_h; int tile_out_w; if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 1; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 3 && p.kernel_w <= 3) { mode = 2; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 3; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 2 && p.kernel_w <= 2) { mode = 4; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 5; tile_out_h = 8; tile_out_w = 32; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 2 && p.kernel_w <= 2) { mode = 6; tile_out_h = 8; tile_out_w = 32; } dim3 block_size; dim3 grid_size; if (tile_out_h > 0 && tile_out_w) { p.loop_major = (p.major_dim - 1) / 16384 + 1; p.loop_x = 1; block_size = dim3(32 * 8, 1, 1); grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim, (p.out_w - 1) / (p.loop_x * tile_out_w) + 1, (p.major_dim - 1) / p.loop_major + 1); } AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] { switch (mode) { case 1: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64>), dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 2: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64>), dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 3: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64>), dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 4: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64>), dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 5: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>), dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 6: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>), dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; } }); return out; }
c688b29fa34fb12afeff0e64166339ed0cbbe035.cu
// Copyright (c) 2019, NVIDIA Corporation. All rights reserved. // // This work is made available under the Nvidia Source Code License-NC. // To view a copy of this license, visit // https://nvlabs.github.io/stylegan2/license.html #include <torch/types.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <cuda.h> #include <cuda_runtime.h> static __host__ __device__ __forceinline__ int floor_div(int a, int b) { int c = a / b; if (c * b > a) { c--; } return c; } struct UpFirDn2DKernelParams { int up_x; int up_y; int down_x; int down_y; int pad_x0; int pad_x1; int pad_y0; int pad_y1; int major_dim; int in_h; int in_w; int minor_dim; int kernel_h; int kernel_w; int out_h; int out_w; int loop_major; int loop_x; }; template <typename scalar_t, int up_x, int up_y, int down_x, int down_y, int kernel_h, int kernel_w, int tile_out_h, int tile_out_w> __global__ void upfirdn2d_kernel(scalar_t* out, const scalar_t* input, const scalar_t* kernel, const UpFirDn2DKernelParams p) { const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1; const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1; __shared__ volatile float sk[kernel_h][kernel_w]; __shared__ volatile float sx[tile_in_h][tile_in_w]; int minor_idx = blockIdx.x; int tile_out_y = minor_idx / p.minor_dim; minor_idx -= tile_out_y * p.minor_dim; tile_out_y *= tile_out_h; int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w; int major_idx_base = blockIdx.z * p.loop_major; if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | major_idx_base >= p.major_dim) { return; } for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; tap_idx += blockDim.x) { int ky = tap_idx / kernel_w; int kx = tap_idx - ky * kernel_w; scalar_t v = 0.0; if (kx < p.kernel_w & ky < p.kernel_h) { v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)]; } sk[ky][kx] = v; } for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major & major_idx < p.major_dim; loop_major++, major_idx++) { for (int loop_x = 0, tile_out_x = tile_out_x_base; loop_x < p.loop_x & tile_out_x < p.out_w; loop_x++, tile_out_x += tile_out_w) { int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0; int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0; int tile_in_x = floor_div(tile_mid_x, up_x); int tile_in_y = floor_div(tile_mid_y, up_y); __syncthreads(); for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; in_idx += blockDim.x) { int rel_in_y = in_idx / tile_in_w; int rel_in_x = in_idx - rel_in_y * tile_in_w; int in_x = rel_in_x + tile_in_x; int in_y = rel_in_y + tile_in_y; scalar_t v = 0.0; if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) { v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx]; } sx[rel_in_y][rel_in_x] = v; } __syncthreads(); for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; out_idx += blockDim.x) { int rel_out_y = out_idx / tile_out_w; int rel_out_x = out_idx - rel_out_y * tile_out_w; int out_x = rel_out_x + tile_out_x; int out_y = rel_out_y + tile_out_y; int mid_x = tile_mid_x + rel_out_x * down_x; int mid_y = tile_mid_y + rel_out_y * down_y; int in_x = floor_div(mid_x, up_x); int in_y = floor_div(mid_y, up_y); int rel_in_x = in_x - tile_in_x; int rel_in_y = in_y - tile_in_y; int kernel_x = (in_x + 1) * up_x - mid_x - 1; int kernel_y = (in_y + 1) * up_y - mid_y - 1; scalar_t v = 0.0; #pragma unroll for (int y = 0; y < kernel_h / up_y; y++) #pragma unroll for (int x = 0; x < kernel_w / up_x; x++) v += sx[rel_in_y + y][rel_in_x + x] * sk[kernel_y + y * up_y][kernel_x + x * up_x]; if (out_x < p.out_w & out_y < p.out_h) { out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v; } } } } } torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel, int up_x, int up_y, int down_x, int down_y, int pad_x0, int pad_x1, int pad_y0, int pad_y1) { int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); UpFirDn2DKernelParams p; auto x = input.contiguous(); auto k = kernel.contiguous(); p.major_dim = x.size(0); p.in_h = x.size(1); p.in_w = x.size(2); p.minor_dim = x.size(3); p.kernel_h = k.size(0); p.kernel_w = k.size(1); p.up_x = up_x; p.up_y = up_y; p.down_x = down_x; p.down_y = down_y; p.pad_x0 = pad_x0; p.pad_x1 = pad_x1; p.pad_y0 = pad_y0; p.pad_y1 = pad_y1; p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / p.down_y; p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / p.down_x; auto out = at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options()); int mode = -1; int tile_out_h; int tile_out_w; if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 1; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 3 && p.kernel_w <= 3) { mode = 2; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 3; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 2 && p.kernel_w <= 2) { mode = 4; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 5; tile_out_h = 8; tile_out_w = 32; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 2 && p.kernel_w <= 2) { mode = 6; tile_out_h = 8; tile_out_w = 32; } dim3 block_size; dim3 grid_size; if (tile_out_h > 0 && tile_out_w) { p.loop_major = (p.major_dim - 1) / 16384 + 1; p.loop_x = 1; block_size = dim3(32 * 8, 1, 1); grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim, (p.out_w - 1) / (p.loop_x * tile_out_w) + 1, (p.major_dim - 1) / p.loop_major + 1); } AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] { switch (mode) { case 1: upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64><<<grid_size, block_size, 0, stream>>>( out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 2: upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64><<<grid_size, block_size, 0, stream>>>( out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 3: upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64><<<grid_size, block_size, 0, stream>>>( out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 4: upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64><<<grid_size, block_size, 0, stream>>>( out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 5: upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32><<<grid_size, block_size, 0, stream>>>( out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 6: upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32><<<grid_size, block_size, 0, stream>>>( out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; } }); return out; }
14778d0b1fc6e1cfa0a58cbdb36021377981187a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <GL/glut.h> #include <GL/gl.h> #include <malloc.h> #include <signal.h> /****************************************************************************** Displays two grey scale images. On the left is an image that has come from an image processing pipeline, just after colour thresholding. On the right is the result of applying an edge detection convolution operator to the left image. This program performs that convolution. Things to note: - A single unsigned char stores a pixel intensity value. 0 is black, 256 is white. - The colour mode used is GL_LUMINANCE. This uses a single number to represent a pixel's intensity. In this case we want 256 shades of grey, which is best stored in eight bits, so GL_UNSIGNED_BYTE is specified as the pixel data type. To compile adapt the code below wo match your filenames: nvcc -o imagecuda imagecuda.cu -lglut -lGL -lm Dr Kevan Buckley, University of Wolverhampton, 2018 ******************************************************************************/ #define width 100 #define height 72 unsigned char results[width * height]; unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,0,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0, 0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0,255, 0,255,255,0,0,0,0,0,0,0,255,255,255,255,0,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 255,255,0,0,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,255,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 255,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,255,0,0,255,0,0,0,0,255,255,255,255,255,255,255,255, 0,255,0,255,255,255,255,0,0,0,0,0,255,0,0,0,0,0,0, 0,0,0,255,0,0,255,255,255,255,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,255,255,255,255,0,255,255,255,255,255,255,255,255,0,0,0,0, 255,0,0,0,0,0,0,0,255,0,0,0,255,0,0,0,255,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,0,255,0,255, 255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,255,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,0,255,255,255,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,0,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,0,0,255,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,255,255,255,255,255,0,255,255,255,255,255, 255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255, 255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 0,0,0,0,0,255,255,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,255,0,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,255,0,0,0,0,0,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255, 255,0,0,0,255,255,255,255,0,255,255,255,0,0,255,255,255,255,255, 255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,0,255,255, 0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,255,255, 255,0,0,0,0,255,0,0,0,0,0,0,255,255,0,0,0,0,255, 0,0,0,255,255,0,0,0,0,0,255,255,0,0,0,255,255,0,0, 255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,0,0,0, 0,0,0,255,255,0,0,0,0,0,0,255,255,255,255,0,0,0,0, 0,0,0,0,255,0,0,0,0,255,0,0,0,0,0,0,0,255,255, 0,0,0,0,255,0,0,0,255,0,0,0,0,0,0,0,255,0,0, 0,255,255,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,0,0,0, 0,255,0,0,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255, 255,255,0,255,255,255,0,0,0,255,0,0,0,255,255,0,0,0,255, 255,0,0,255,255,0,0,255,255,255,0,0,0,255,0,0,255,255,0, 0,0,255,0,0,0,255,255,0,0,255,255,255,255,255,255,255,255,255, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 255,255,255,0,0,0,255,0,0,255,255,0,0,0,255,255,0,0,0, 0,0,255,255,255,255,255,255,255,255,0,0,0,0,255,0,0,0,255, 255,255,0,0,0,0,0,0,255,255,0,0,255,255,255,0,0,0,255, 0,0,255,255,255,0,0,255,0,0,0,255,255,0,0,255,255,255,255, 255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,255,0,0,255,255,0,0,0, 255,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,255, 255,255,0,0,255,255,255,255,255,0,0,0,0,255,255,0,0,255,255, 255,0,0,0,255,0,0,255,255,255,0,0,255,0,0,0,255,255,0, 0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,0,0, 255,255,0,0,0,255,0,0,255,255,0,255,255,255,255,255,0,0,0, 0,0,0,255,255,255,0,0,0,0,255,255,0,0,255,0,0,0,255, 0,0,0,0,0,255,255,0,0,255,0,0,0,0,0,0,0,255,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,0, 0,255,255,0,0,255,255,255,0,0,255,0,0,0,0,0,0,255,255, 255,255,0,0,0,255,255,255,255,255,0,0,0,0,0,0,255,0,0, 0,0,0,0,255,0,0,0,0,0,255,255,0,0,255,255,0,0,0, 0,0,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0, 0,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,0,0, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0, 255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,255,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,0,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,0,255,255,255, 255,255,255,0,0,255,255,255,255,255,255,255,0,0,255,255,255,255,255, 255,255,255,0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0, 0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,0,0,255,255, 255,0,0,0,0,0,0,255,0,0,255,0,0,255,255,255,0,0,255, 255,255,255,255,255,0,0,0,0,0,0,255,255,255,0,0,0,255,0, 0,0,0,0,0,255,255,0,0,0,0,255,0,0,255,255,0,0,0, 0,0,255,0,0,0,255,255,0,0,255,255,255,255,255,255,255,255,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255, 255,255,0,0,255,255,0,0,255,0,0,0,255,0,0,255,255,0,0, 255,255,0,0,255,255,255,255,255,0,0,0,255,255,0,0,0,255,255, 0,0,255,255,0,0,255,255,0,0,255,255,0,0,255,255,255,0,0, 255,0,0,255,255,0,0,255,255,0,0,255,255,0,0,255,255,255,255, 255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255, 255,255,0,0,0,255,255,0,0,255,255,0,0,0,0,0,0,255,0, 0,255,255,0,0,255,255,0,0,255,255,255,255,255,255,255,255,255,255, 0,0,0,255,255,0,0,255,255,255,0,0,0,0,0,255,255,0,0, 255,255,255,0,0,255,0,0,255,255,255,0,255,255,0,0,255,255,0, 0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,0,0,255,255,255,255,0, 0,0,0,255,255,0,0,255,0,0,255,255,0,0,255,255,255,255,255, 255,255,0,0,0,0,0,255,255,255,0,0,255,255,255,255,255,255,0, 0,255,255,0,0,255,255,255,0,0,255,0,0,255,255,255,0,255,255, 0,0,255,255,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,0,0, 0,0,255,0,0,0,0,0,255,255,0,0,255,0,0,0,0,0,0, 255,255,255,255,255,255,0,0,0,0,0,255,255,255,0,0,0,0,255, 255,0,0,0,0,0,255,0,0,0,0,0,255,0,0,255,255,0,0, 0,0,0,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,0, 0,255,255,255,0,0,0,255,0,0,0,0,0,255,255,0,0,255,0, 0,0,0,0,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255, 0,0,0,0,255,255,255,0,0,0,255,255,255,0,0,0,0,255,255, 0,255,255,255,0,0,0,255,255,255,0,255,0,0,255,255,255,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255, 255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,0,0,0,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255, 0,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }; __global__ void detect_edges(unsigned char *in, unsigned char *out) { int i; int n_pixels = width * height; for(i=0;i<n_pixels;i++) { int x, y; // the pixel of interest int b, d, f, h; // the pixels adjacent to x,y used for the calculation int r; // the result of calculate y = i / width; x = i - (width * y); if (x == 0 || y == 0 || x == width - 1 || y == height - 1) { out[i] = 0; } else { b = i + width; d = i - 1; f = i + 1; h = i - width; r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1) + (in[h] * -1); if (r > 0) { // if the result is positive this is an edge pixel out[i] = 255; } else { out[i] = 0; } } } } void tidy_and_exit() { exit(0); } void sigint_callback(int signal_number){ printf("\nInterrupt from keyboard\n"); tidy_and_exit(); } static void display() { glClear(GL_COLOR_BUFFER_BIT); glRasterPos4i(-1, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image); glRasterPos4i(0, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results); glFlush(); } static void key_pressed(unsigned char key, int x, int y) { switch(key){ case 27: // escape tidy_and_exit(); break; default: printf("\nPress escape to exit\n"); break; } } int time_difference(struct timespec *start, struct timespec *finish,long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char **argv) { unsigned char *d_results; unsigned char *d_image; hipMalloc((void**)&d_image, sizeof(unsigned char) * (width * height)); hipMalloc((void**)&d_results, sizeof(unsigned char) * (width * height)); hipMemcpy(d_image,&image,sizeof(unsigned char) * (width * height),hipMemcpyHostToDevice); signal(SIGINT, sigint_callback); struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); hipLaunchKernelGGL(( detect_edges), dim3(100),dim3(72), 0, 0, d_image,d_results); hipDeviceSynchronize(); hipMemcpy(&results, d_results, sizeof(unsigned char) * (width*height), hipMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); hipFree(&d_image); hipFree(&d_results); glutInit(&argc, argv); glutInitWindowSize(width * 2,height); glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE); glutCreateWindow("6CS005 Image Progessing Courework"); glutDisplayFunc(display); glutKeyboardFunc(key_pressed); glClearColor(0.0, 1.0, 0.0, 1.0); glutMainLoop(); tidy_and_exit(); return 0; }
14778d0b1fc6e1cfa0a58cbdb36021377981187a.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <GL/glut.h> #include <GL/gl.h> #include <malloc.h> #include <signal.h> /****************************************************************************** Displays two grey scale images. On the left is an image that has come from an image processing pipeline, just after colour thresholding. On the right is the result of applying an edge detection convolution operator to the left image. This program performs that convolution. Things to note: - A single unsigned char stores a pixel intensity value. 0 is black, 256 is white. - The colour mode used is GL_LUMINANCE. This uses a single number to represent a pixel's intensity. In this case we want 256 shades of grey, which is best stored in eight bits, so GL_UNSIGNED_BYTE is specified as the pixel data type. To compile adapt the code below wo match your filenames: nvcc -o imagecuda imagecuda.cu -lglut -lGL -lm Dr Kevan Buckley, University of Wolverhampton, 2018 ******************************************************************************/ #define width 100 #define height 72 unsigned char results[width * height]; unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,0,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0, 0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0,255, 0,255,255,0,0,0,0,0,0,0,255,255,255,255,0,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 255,255,0,0,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,255,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 255,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,255,0,0,255,0,0,0,0,255,255,255,255,255,255,255,255, 0,255,0,255,255,255,255,0,0,0,0,0,255,0,0,0,0,0,0, 0,0,0,255,0,0,255,255,255,255,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,255,255,255,255,0,255,255,255,255,255,255,255,255,0,0,0,0, 255,0,0,0,0,0,0,0,255,0,0,0,255,0,0,0,255,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,0,255,0,255, 255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,255,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,0,255,255,255,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,0,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,0,0,255,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,255,255,255,255,255,0,255,255,255,255,255, 255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255, 255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 0,0,0,0,0,255,255,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,255,0,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,255,0,0,0,0,0,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255, 255,0,0,0,255,255,255,255,0,255,255,255,0,0,255,255,255,255,255, 255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,0,255,255, 0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,255,255, 255,0,0,0,0,255,0,0,0,0,0,0,255,255,0,0,0,0,255, 0,0,0,255,255,0,0,0,0,0,255,255,0,0,0,255,255,0,0, 255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,0,0,0, 0,0,0,255,255,0,0,0,0,0,0,255,255,255,255,0,0,0,0, 0,0,0,0,255,0,0,0,0,255,0,0,0,0,0,0,0,255,255, 0,0,0,0,255,0,0,0,255,0,0,0,0,0,0,0,255,0,0, 0,255,255,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,0,0,0, 0,255,0,0,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255, 255,255,0,255,255,255,0,0,0,255,0,0,0,255,255,0,0,0,255, 255,0,0,255,255,0,0,255,255,255,0,0,0,255,0,0,255,255,0, 0,0,255,0,0,0,255,255,0,0,255,255,255,255,255,255,255,255,255, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 255,255,255,0,0,0,255,0,0,255,255,0,0,0,255,255,0,0,0, 0,0,255,255,255,255,255,255,255,255,0,0,0,0,255,0,0,0,255, 255,255,0,0,0,0,0,0,255,255,0,0,255,255,255,0,0,0,255, 0,0,255,255,255,0,0,255,0,0,0,255,255,0,0,255,255,255,255, 255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,255,0,0,255,255,0,0,0, 255,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,255, 255,255,0,0,255,255,255,255,255,0,0,0,0,255,255,0,0,255,255, 255,0,0,0,255,0,0,255,255,255,0,0,255,0,0,0,255,255,0, 0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,0,0, 255,255,0,0,0,255,0,0,255,255,0,255,255,255,255,255,0,0,0, 0,0,0,255,255,255,0,0,0,0,255,255,0,0,255,0,0,0,255, 0,0,0,0,0,255,255,0,0,255,0,0,0,0,0,0,0,255,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,0, 0,255,255,0,0,255,255,255,0,0,255,0,0,0,0,0,0,255,255, 255,255,0,0,0,255,255,255,255,255,0,0,0,0,0,0,255,0,0, 0,0,0,0,255,0,0,0,0,0,255,255,0,0,255,255,0,0,0, 0,0,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0, 0,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,0,0, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0, 255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,255,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,0,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,0,255,255,255, 255,255,255,0,0,255,255,255,255,255,255,255,0,0,255,255,255,255,255, 255,255,255,0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0, 0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,0,0,255,255, 255,0,0,0,0,0,0,255,0,0,255,0,0,255,255,255,0,0,255, 255,255,255,255,255,0,0,0,0,0,0,255,255,255,0,0,0,255,0, 0,0,0,0,0,255,255,0,0,0,0,255,0,0,255,255,0,0,0, 0,0,255,0,0,0,255,255,0,0,255,255,255,255,255,255,255,255,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255, 255,255,0,0,255,255,0,0,255,0,0,0,255,0,0,255,255,0,0, 255,255,0,0,255,255,255,255,255,0,0,0,255,255,0,0,0,255,255, 0,0,255,255,0,0,255,255,0,0,255,255,0,0,255,255,255,0,0, 255,0,0,255,255,0,0,255,255,0,0,255,255,0,0,255,255,255,255, 255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255, 255,255,0,0,0,255,255,0,0,255,255,0,0,0,0,0,0,255,0, 0,255,255,0,0,255,255,0,0,255,255,255,255,255,255,255,255,255,255, 0,0,0,255,255,0,0,255,255,255,0,0,0,0,0,255,255,0,0, 255,255,255,0,0,255,0,0,255,255,255,0,255,255,0,0,255,255,0, 0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,0,0,255,255,255,255,0, 0,0,0,255,255,0,0,255,0,0,255,255,0,0,255,255,255,255,255, 255,255,0,0,0,0,0,255,255,255,0,0,255,255,255,255,255,255,0, 0,255,255,0,0,255,255,255,0,0,255,0,0,255,255,255,0,255,255, 0,0,255,255,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,0,0, 0,0,255,0,0,0,0,0,255,255,0,0,255,0,0,0,0,0,0, 255,255,255,255,255,255,0,0,0,0,0,255,255,255,0,0,0,0,255, 255,0,0,0,0,0,255,0,0,0,0,0,255,0,0,255,255,0,0, 0,0,0,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,0, 0,255,255,255,0,0,0,255,0,0,0,0,0,255,255,0,0,255,0, 0,0,0,0,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255, 0,0,0,0,255,255,255,0,0,0,255,255,255,0,0,0,0,255,255, 0,255,255,255,0,0,0,255,255,255,0,255,0,0,255,255,255,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255, 255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,0,0,0,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255, 0,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }; __global__ void detect_edges(unsigned char *in, unsigned char *out) { int i; int n_pixels = width * height; for(i=0;i<n_pixels;i++) { int x, y; // the pixel of interest int b, d, f, h; // the pixels adjacent to x,y used for the calculation int r; // the result of calculate y = i / width; x = i - (width * y); if (x == 0 || y == 0 || x == width - 1 || y == height - 1) { out[i] = 0; } else { b = i + width; d = i - 1; f = i + 1; h = i - width; r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1) + (in[h] * -1); if (r > 0) { // if the result is positive this is an edge pixel out[i] = 255; } else { out[i] = 0; } } } } void tidy_and_exit() { exit(0); } void sigint_callback(int signal_number){ printf("\nInterrupt from keyboard\n"); tidy_and_exit(); } static void display() { glClear(GL_COLOR_BUFFER_BIT); glRasterPos4i(-1, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image); glRasterPos4i(0, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results); glFlush(); } static void key_pressed(unsigned char key, int x, int y) { switch(key){ case 27: // escape tidy_and_exit(); break; default: printf("\nPress escape to exit\n"); break; } } int time_difference(struct timespec *start, struct timespec *finish,long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char **argv) { unsigned char *d_results; unsigned char *d_image; cudaMalloc((void**)&d_image, sizeof(unsigned char) * (width * height)); cudaMalloc((void**)&d_results, sizeof(unsigned char) * (width * height)); cudaMemcpy(d_image,&image,sizeof(unsigned char) * (width * height),cudaMemcpyHostToDevice); signal(SIGINT, sigint_callback); struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); detect_edges<<<100,72>>>(d_image,d_results); cudaThreadSynchronize(); cudaMemcpy(&results, d_results, sizeof(unsigned char) * (width*height), cudaMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); cudaFree(&d_image); cudaFree(&d_results); glutInit(&argc, argv); glutInitWindowSize(width * 2,height); glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE); glutCreateWindow("6CS005 Image Progessing Courework"); glutDisplayFunc(display); glutKeyboardFunc(key_pressed); glClearColor(0.0, 1.0, 0.0, 1.0); glutMainLoop(); tidy_and_exit(); return 0; }
37c9d88ab760fcb91e31010830b396100329e1a3.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #define TILE_SIZE 4 #define chunk_size 2 /*The algorithm and code given in the main reference paper have been followed*/ /*All matrices stored and accessed in row major form*/ /*Function to perform rank-k update */ __device__ void ssyrk_tile(float* rA1, float* rA2) { int row = threadIdx.z; int column = threadIdx.y; if(column <= row) { float updatedValue = rA2[row * TILE_SIZE + column]; for(int k=0; k<TILE_SIZE; k++) { updatedValue -= rA1[row * TILE_SIZE + k] * rA1[column * TILE_SIZE + k]; } rA2[row * TILE_SIZE + column] = updatedValue; } } /*General Matrix Multiplication*/ __device__ void sgemm_tile(float* rA1, float* rA2, float* rA3) { int row = threadIdx.z; int column = threadIdx.y; float updatedValue = rA3[row * TILE_SIZE + column]; for(int i=0; i<TILE_SIZE; i++) { updatedValue -= rA1[row * TILE_SIZE + i] * rA2[column*TILE_SIZE + i]; } rA3[row * TILE_SIZE + column] = updatedValue; } /*Function to perform Cholesky Factorization for a tile*/ __device__ void spotrf_tile(float* t_A) { int t_x = threadIdx.y; int t_y = threadIdx.z; for(int k=0;k<TILE_SIZE;k++) { if(t_x==t_y && t_x==k) t_A[k*TILE_SIZE + k] = sqrtf(t_A[k*TILE_SIZE + k]); __syncthreads(); if(t_x<t_y && t_x == k && t_x<TILE_SIZE && t_y<TILE_SIZE) { t_A[t_y*TILE_SIZE + k]/= t_A[k*TILE_SIZE + k]; } __syncthreads(); if(t_x<=t_y && t_x>k && t_y>k && t_x<TILE_SIZE && t_y<TILE_SIZE) { t_A[t_y*TILE_SIZE + t_x]-= t_A[t_x*TILE_SIZE + k]*t_A[t_y*TILE_SIZE + k]; } __syncthreads(); } } /*Function to perform triangular solve for a tile */ __device__ void strsm_tile(float *t_A1, float *t_A2) { int tx = threadIdx.y; int ty = threadIdx.z; for(int i=0;i<TILE_SIZE;i++) { if(tx==0) { t_A2[ty*TILE_SIZE + i] /= t_A1[i*TILE_SIZE + i]; } __syncthreads(); if(tx>i && i<TILE_SIZE-1) { t_A2[ty*TILE_SIZE+tx] -= (t_A2[ty*TILE_SIZE + i]*t_A1[tx*TILE_SIZE + i]); } __syncthreads(); } } // __device__ void load_full_tile(int m, int n, float* g_in, float* arr, int N) // { // int i = m*TILE_SIZE + threadIdx.z; // int j = n*TILE_SIZE + threadIdx.y; // if(!(i<N && j<N)) return; // arr[threadIdx.z*TILE_SIZE + threadIdx.y] = g_in[i*N + j]; // __syncthreads(); // } __device__ void load_full_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) { int tx = threadIdx.x; // local threadid in x int ty = threadIdx.y; // local threadid in y int tz = threadIdx.z; //printf("%d %d %d \n",tx,ty,tz); int row = tile_y * TILE_SIZE + tz; // access row int column = tile_x * TILE_SIZE + ty; // access col if(ty < TILE_SIZE && tz < TILE_SIZE && tx<M) { int g_threadX = blockIdx.x * blockDim.x + threadIdx.x; int x = row*N + column; int global_id = ((g_threadX / chunk_size) *chunk_size)*N*N + x *chunk_size + (g_threadX % chunk_size); s_mem[tz * TILE_SIZE + ty + shared_size_single_matrix*tx] = (row < N && column < N) ? g_mem[ global_id] : 0; // we need to think about access expression of global memory. //M: Total number of matrices. N:dim of matrix } __syncthreads(); } // __device__ void load_full_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) // { // int tx = threadIdx.x; // local threadid in x // int ty = threadIdx.y; // local threadid in y // int tz = threadIdx.z; // //printf("%d %d %d \n",tx,ty,tz); // int row = tile_y * TILE_SIZE + tz; // access row // int column = tile_x * TILE_SIZE + ty; // access col // if(ty < TILE_SIZE && tz < TILE_SIZE && tx<M) // { // s_mem[tz * TILE_SIZE + ty + shared_size_single_matrix*tx] = (row < N && column < N) ? g_mem[ blockIdx.x * blockDim.x + tx + row * N * M + column*M] : 0; // we need to think about access expression of global memory. //M: Total number of matrices. N:dim of matrix // } // __syncthreads(); // } // __device__ void store_full_tile(int m, int n, float* g_in, float* arr, int N) // { // int i = m*TILE_SIZE + threadIdx.z; // int j = n*TILE_SIZE + threadIdx.y; // if(!(i<N && j<N)) return; // g_in[i*N + j] = arr[threadIdx.z*TILE_SIZE + threadIdx.y]; // __syncthreads(); // } __device__ void store_full_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) { int tx = threadIdx.y; // local threadid in x int ty = threadIdx.z; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(row < N && column < N) { int g_threadX = blockIdx.x * blockDim.x + threadIdx.x; int x = row*N + column; int global_id = ((g_threadX / chunk_size) *chunk_size)*N*N + x *chunk_size + (g_threadX % chunk_size); g_mem[global_id] = (tx < TILE_SIZE && ty < TILE_SIZE) ? s_mem[ty * TILE_SIZE + tx + shared_size_single_matrix*threadIdx.x] : 0; } __syncthreads(); } __device__ void load_lower_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) { // int i = m*TILE_SIZE + threadIdx.z; // int j = n*TILE_SIZE + threadIdx.y; // if(!(i<N && j<N)) return; // if(threadIdx.y<=threadIdx.z) // arr[threadIdx.z*TILE_SIZE + threadIdx.y] = g_in[i*N + j]; // else arr[threadIdx.z*TILE_SIZE + threadIdx.y] = 0.0; // __syncthreads(); int tx = threadIdx.x; // local threadid in x int ty = threadIdx.y; // local threadid in y int tz = threadIdx.z; //printf("%d %d %d \n",tx,ty,tz); int row = tile_y * TILE_SIZE + tz; // access row int column = tile_x * TILE_SIZE + ty; // access col if(ty < TILE_SIZE && tz < TILE_SIZE && tx<M) { int g_threadX = blockIdx.x * blockDim.x + threadIdx.x; int x = row*N + column; int global_id = ((g_threadX / chunk_size) *chunk_size)*N*N + x *chunk_size + (g_threadX % chunk_size); if(threadIdx.y<=threadIdx.z) s_mem[tz * TILE_SIZE + ty + shared_size_single_matrix*tx] = (row < N && column < N) ? g_mem[ global_id] : 0; // we need to think about access expression of global memory. //M: Total number of matrices. N:dim of matrix else s_mem[tz * TILE_SIZE + ty + shared_size_single_matrix*tx] = 0.0; } __syncthreads(); } // __device__ void store_lower_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) // { // // int i = m*TILE_SIZE + threadIdx.z; // // int j = n*TILE_SIZE + threadIdx.y; // // if(!(i<N && j<N)) return; // // if(threadIdx.y<=threadIdx.z) // // g_in[i*N + j] = arr[threadIdx.z*TILE_SIZE + threadIdx.y]; // // else g_in[i*N + j] = 0.0; // // __syncthreads(); // int tx = threadIdx.x; // local threadid in x // int ty = threadIdx.y; // local threadid in y // int tz = threadIdx.z; // //printf("%d %d %d \n",tx,ty,tz); // int row = tile_y * TILE_SIZE + tz; // access row // int column = tile_x * TILE_SIZE + ty; // access col // if(ty < TILE_SIZE && tz < TILE_SIZE && tx<M) // { // if(threadIdx.y<=threadIdx.z) // g_mem[ blockIdx.x * blockDim.x + tx + row * N * M + column*M] = s_mem[tz * TILE_SIZE + ty + shared_size_single_matrix*tx]; // we need to think about access expression of global memory. //M: Total number of matrices. N:dim of matrix // else // g_mem[ blockIdx.x * blockDim.x + tx + row * N * M + column*M] = 0.0; // } // __syncthreads(); // } __device__ void store_lower_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) { int tx = threadIdx.y; // local threadid in x int ty = threadIdx.z; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(row < N && column < N) { int g_threadX = blockIdx.x * blockDim.x + threadIdx.x; int x = row*N + column; int global_id = ((g_threadX / chunk_size) *chunk_size)*N*N + x *chunk_size + (g_threadX % chunk_size); g_mem[global_id] = (tx < TILE_SIZE && ty < TILE_SIZE && column <= row) ? s_mem[ty * TILE_SIZE + tx + shared_size_single_matrix*threadIdx.x] : 0; } __syncthreads(); } // __device__ void store_lower_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) // { // int tx = threadIdx.y; // local threadid in x // int ty = threadIdx.z; // local threadid in y // int row = tile_y * TILE_SIZE + ty; // access row // int column = tile_x * TILE_SIZE + tx; // access col // if(row < N && column < N) // { // g_mem[blockIdx.x * blockDim.x + threadIdx.x + row * N * M + column*M] = (tx < TILE_SIZE && ty < TILE_SIZE && column <= row) ? s_mem[ty * TILE_SIZE + tx + shared_size_single_matrix*threadIdx.x] : 0; // } // __syncthreads(); // } __device__ void store_zeros(float *g_mem, int tile_y, int tile_x, int N, int M) { int tx = threadIdx.y; // local threadid in x int ty = threadIdx.z; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(row < N && column < N) { int g_threadX = blockIdx.x * blockDim.x + threadIdx.x; int x = row*N + column; int global_id = ((g_threadX / chunk_size) *chunk_size)*N*N + x *chunk_size + (g_threadX % chunk_size); g_mem[global_id] = 0; } __syncthreads(); } // __device__ void store_zeros(float *g_mem, int tile_y, int tile_x, int N, int M) // { // int tx = threadIdx.y; // local threadid in x // int ty = threadIdx.z; // local threadid in y // int row = tile_y * TILE_SIZE + ty; // access row // int column = tile_x * TILE_SIZE + tx; // access col // if(row < N && column < N) // { // // printf("hello %d %d\n", threadIdx.x ,blockIdx.x * blockDim.x + threadIdx.x + row * N * M + column*M); // g_mem[blockIdx.x * blockDim.x + threadIdx.x + row * N * M + column*M] = 0; // } // __syncthreads(); // } __device__ void print_matrix(float* h_A,int num_of_matrices,int dim_of_matrix) { for (int matrix_index = 0; matrix_index < num_of_matrices; matrix_index++) { for (int row = 0; row < dim_of_matrix; row++) { for (int column = 0; column < dim_of_matrix; column++) { //write_element = h_A[matrix_index * dim_of_matrix * dim_of_matrix + row * dim_of_matrix + column]; int global_id = row * dim_of_matrix * num_of_matrices + column * num_of_matrices + matrix_index; float write_element = h_A[global_id] ; printf("%0.2f ", write_element); } printf("\n"); } printf("\n"); } } __global__ void launch_kernel(float* d_mat, int N, int M , int shared_size_single_matrix) { extern __shared__ float shared_mem[]; float *rA1, *rA2, *rA3; rA1 = shared_mem; rA2 = shared_mem + TILE_SIZE*TILE_SIZE; rA3 = shared_mem + 2*TILE_SIZE*TILE_SIZE; int tx = threadIdx.x; int nn,kk,mm; int num_tiles = (N/TILE_SIZE) + ((N%TILE_SIZE)!=0); for(kk=0; kk<num_tiles; kk++) { for(nn=0; nn<kk; nn++) { // load_full_tile(kk,nn,d_mat,rA3,N); load_full_tile(d_mat , rA3, kk,nn,N,M, shared_size_single_matrix); for(mm=0; mm<nn; mm++) { // load_full_tile(kk,mm,d_mat,rA1,N); load_full_tile(d_mat , rA1, kk,mm,N,M, shared_size_single_matrix); // load_full_tile(nn,mm,d_mat,rA2,N); load_full_tile(d_mat , rA2, nn,mm,N,M, shared_size_single_matrix); sgemm_tile(&rA1[tx*shared_size_single_matrix],&rA2[tx*shared_size_single_matrix],&rA3[tx*shared_size_single_matrix]); __syncthreads(); } // load_lower_tile(nn,nn,d_mat,rA1,N); load_lower_tile(d_mat , rA1, nn,nn,N,M, shared_size_single_matrix); strsm_tile(&rA1[tx*shared_size_single_matrix], &rA3[tx*shared_size_single_matrix]); __syncthreads(); store_full_tile(d_mat , rA3, kk,nn,N,M, shared_size_single_matrix); } // load_lower_tile(kk,kk,d_mat,rA1,N); load_lower_tile(d_mat , rA1, kk,kk,N,M, shared_size_single_matrix); for(nn=0; nn<kk; nn++) { // load_full_tile(kk,nn,d_mat,rA2,N); load_full_tile(d_mat , rA2, kk,nn,N,M, shared_size_single_matrix); ssyrk_tile(&rA2[tx*shared_size_single_matrix],&rA1[tx*shared_size_single_matrix]); __syncthreads(); } spotrf_tile(&rA1[tx*shared_size_single_matrix]); //__syncthreads(); // store_lower_tile(kk,kk,d_mat,rA1,N); store_lower_tile(d_mat , rA1, kk,kk,N,M, shared_size_single_matrix); } rA1[threadIdx.z*TILE_SIZE + threadIdx.y] = 0.0; __syncthreads(); for(kk=0; kk<num_tiles; kk++) for(nn=kk+1; nn<num_tiles; nn++) { if(kk < nn) { // printf("hii\n %d %d", kk, nn); store_zeros(d_mat, kk, nn, N, M); } else { store_full_tile(d_mat , rA1, kk,nn,N,M, shared_size_single_matrix); } // if(threadIdx.x==0 && threadIdx.y==0 && threadIdx.z==0) // print_matrix(d_mat,M,N); } } int main() { // READ FROM THE INPUT FILE FILE *fptr; fptr = fopen("./dataset/size4_256matrices.txt", "r"); int num_of_matrices, dim_of_matrix; fscanf(fptr, "%d", &num_of_matrices); fscanf(fptr, "%d", &dim_of_matrix); float read_element; float* h_A = NULL; int numElements = num_of_matrices * dim_of_matrix * dim_of_matrix; size_t size = numElements * sizeof(float); hipDeviceProp_t devp; hipGetDeviceProperties(&devp, 0); h_A = (float *)malloc(size); int global_id = 0; for(int p = 0; p < (num_of_matrices/chunk_size); p++) { for (int matrix_index = 0; matrix_index < chunk_size; matrix_index++) { for (int row = 0; row < dim_of_matrix; row++) { for (int column = 0; column < dim_of_matrix; column++) { fscanf(fptr, "%f", &read_element); int x = row*dim_of_matrix + column; global_id = (p*chunk_size)*dim_of_matrix*dim_of_matrix + x*chunk_size + matrix_index; h_A[global_id] = read_element; } } } } printf("\nRead from the input file successfully!\n"); fclose(fptr); printf("\nPrinting the host-side input array read from the input file:\n"); for (int i = 0; i < numElements; i++) { printf("%f ", h_A[i]); } printf("\n\n"); // COPY TO DEVICE hipError_t err = hipSuccess; float *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } else { printf("Copied the h_A to device side successfully!\n\n"); } // LAUNCH KERNEL int num_of_matrices_per_block = min(min(1024/(TILE_SIZE * TILE_SIZE) , num_of_matrices), chunk_size); dim3 grid(num_of_matrices / num_of_matrices_per_block , 1, 1); dim3 block(num_of_matrices_per_block, TILE_SIZE, TILE_SIZE); // no of tiles in a column int INPUT_SIZE = dim_of_matrix; int no_of_tiles = (INPUT_SIZE / TILE_SIZE) + (INPUT_SIZE % TILE_SIZE != 0); // ceil of (INPUT_SIZE / TILE_SIZE) hipLaunchKernelGGL(( launch_kernel), dim3(grid), dim3(block), num_of_matrices_per_block * 3 * TILE_SIZE * TILE_SIZE * sizeof(float), 0, d_A, dim_of_matrix, num_of_matrices ,3 * TILE_SIZE * TILE_SIZE); // hipMemcpy(h_mat, d_mat, size, hipMemcpyDeviceToHost); // print_matrix(h_mat, N, N); err = hipMemcpy(h_A, d_A, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } else { printf("\nCopied d_A to host side successfully!\n"); } printf("\nPrinting the output of cudememcopyDeviceToHost, i.e. the host-side array returned from device side:\n"); for (int i = 0; i < numElements; i++) { printf("%f ", h_A[i]); } err = hipFree(d_A); if(err != hipSuccess) { fprintf(stderr, "\nFailed to free device matrix M (error code %s)\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipDeviceReset(); if(err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipError_t cudaerr = hipDeviceSynchronize(); if (cudaerr != hipSuccess) { printf("kernel launch failed with error \"%s\".\n", hipGetErrorString(cudaerr)); } FILE *fptr1; fptr1 = fopen("./output_c.txt", "w+"); float write_element; fprintf(fptr1, "%d\n", num_of_matrices); fprintf(fptr1, "%d\n", dim_of_matrix); for(int p = 0; p < (num_of_matrices/chunk_size); p++) { for (int matrix_index = 0; matrix_index < chunk_size; matrix_index++) { for (int row = 0; row < dim_of_matrix; row++) { for (int column = 0; column < dim_of_matrix; column++) { // fscanf(fptr, "%f", &read_element); int x = row*dim_of_matrix + column; global_id = (p*chunk_size)*dim_of_matrix*dim_of_matrix + x*chunk_size + matrix_index; write_element = h_A[global_id]; fprintf(fptr1, "%0.2f ", write_element); // printf("At pos %d we get %0.2f\n", global_id, h_A[global_id]); // printf("%0.2f \n ", h_A[global_id]); } fprintf(fptr1, "\n"); } // fprintf(fptr1, "\n"); } } fclose(fptr1); free(h_A); printf("\n\nAll tasks completed successfully!\n\n"); return 0; }
37c9d88ab760fcb91e31010830b396100329e1a3.cu
#include <cuda_runtime.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #define TILE_SIZE 4 #define chunk_size 2 /*The algorithm and code given in the main reference paper have been followed*/ /*All matrices stored and accessed in row major form*/ /*Function to perform rank-k update */ __device__ void ssyrk_tile(float* rA1, float* rA2) { int row = threadIdx.z; int column = threadIdx.y; if(column <= row) { float updatedValue = rA2[row * TILE_SIZE + column]; for(int k=0; k<TILE_SIZE; k++) { updatedValue -= rA1[row * TILE_SIZE + k] * rA1[column * TILE_SIZE + k]; } rA2[row * TILE_SIZE + column] = updatedValue; } } /*General Matrix Multiplication*/ __device__ void sgemm_tile(float* rA1, float* rA2, float* rA3) { int row = threadIdx.z; int column = threadIdx.y; float updatedValue = rA3[row * TILE_SIZE + column]; for(int i=0; i<TILE_SIZE; i++) { updatedValue -= rA1[row * TILE_SIZE + i] * rA2[column*TILE_SIZE + i]; } rA3[row * TILE_SIZE + column] = updatedValue; } /*Function to perform Cholesky Factorization for a tile*/ __device__ void spotrf_tile(float* t_A) { int t_x = threadIdx.y; int t_y = threadIdx.z; for(int k=0;k<TILE_SIZE;k++) { if(t_x==t_y && t_x==k) t_A[k*TILE_SIZE + k] = sqrtf(t_A[k*TILE_SIZE + k]); __syncthreads(); if(t_x<t_y && t_x == k && t_x<TILE_SIZE && t_y<TILE_SIZE) { t_A[t_y*TILE_SIZE + k]/= t_A[k*TILE_SIZE + k]; } __syncthreads(); if(t_x<=t_y && t_x>k && t_y>k && t_x<TILE_SIZE && t_y<TILE_SIZE) { t_A[t_y*TILE_SIZE + t_x]-= t_A[t_x*TILE_SIZE + k]*t_A[t_y*TILE_SIZE + k]; } __syncthreads(); } } /*Function to perform triangular solve for a tile */ __device__ void strsm_tile(float *t_A1, float *t_A2) { int tx = threadIdx.y; int ty = threadIdx.z; for(int i=0;i<TILE_SIZE;i++) { if(tx==0) { t_A2[ty*TILE_SIZE + i] /= t_A1[i*TILE_SIZE + i]; } __syncthreads(); if(tx>i && i<TILE_SIZE-1) { t_A2[ty*TILE_SIZE+tx] -= (t_A2[ty*TILE_SIZE + i]*t_A1[tx*TILE_SIZE + i]); } __syncthreads(); } } // __device__ void load_full_tile(int m, int n, float* g_in, float* arr, int N) // { // int i = m*TILE_SIZE + threadIdx.z; // int j = n*TILE_SIZE + threadIdx.y; // if(!(i<N && j<N)) return; // arr[threadIdx.z*TILE_SIZE + threadIdx.y] = g_in[i*N + j]; // __syncthreads(); // } __device__ void load_full_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) { int tx = threadIdx.x; // local threadid in x int ty = threadIdx.y; // local threadid in y int tz = threadIdx.z; //printf("%d %d %d \n",tx,ty,tz); int row = tile_y * TILE_SIZE + tz; // access row int column = tile_x * TILE_SIZE + ty; // access col if(ty < TILE_SIZE && tz < TILE_SIZE && tx<M) { int g_threadX = blockIdx.x * blockDim.x + threadIdx.x; int x = row*N + column; int global_id = ((g_threadX / chunk_size) *chunk_size)*N*N + x *chunk_size + (g_threadX % chunk_size); s_mem[tz * TILE_SIZE + ty + shared_size_single_matrix*tx] = (row < N && column < N) ? g_mem[ global_id] : 0; // we need to think about access expression of global memory. //M: Total number of matrices. N:dim of matrix } __syncthreads(); } // __device__ void load_full_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) // { // int tx = threadIdx.x; // local threadid in x // int ty = threadIdx.y; // local threadid in y // int tz = threadIdx.z; // //printf("%d %d %d \n",tx,ty,tz); // int row = tile_y * TILE_SIZE + tz; // access row // int column = tile_x * TILE_SIZE + ty; // access col // if(ty < TILE_SIZE && tz < TILE_SIZE && tx<M) // { // s_mem[tz * TILE_SIZE + ty + shared_size_single_matrix*tx] = (row < N && column < N) ? g_mem[ blockIdx.x * blockDim.x + tx + row * N * M + column*M] : 0; // we need to think about access expression of global memory. //M: Total number of matrices. N:dim of matrix // } // __syncthreads(); // } // __device__ void store_full_tile(int m, int n, float* g_in, float* arr, int N) // { // int i = m*TILE_SIZE + threadIdx.z; // int j = n*TILE_SIZE + threadIdx.y; // if(!(i<N && j<N)) return; // g_in[i*N + j] = arr[threadIdx.z*TILE_SIZE + threadIdx.y]; // __syncthreads(); // } __device__ void store_full_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) { int tx = threadIdx.y; // local threadid in x int ty = threadIdx.z; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(row < N && column < N) { int g_threadX = blockIdx.x * blockDim.x + threadIdx.x; int x = row*N + column; int global_id = ((g_threadX / chunk_size) *chunk_size)*N*N + x *chunk_size + (g_threadX % chunk_size); g_mem[global_id] = (tx < TILE_SIZE && ty < TILE_SIZE) ? s_mem[ty * TILE_SIZE + tx + shared_size_single_matrix*threadIdx.x] : 0; } __syncthreads(); } __device__ void load_lower_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) { // int i = m*TILE_SIZE + threadIdx.z; // int j = n*TILE_SIZE + threadIdx.y; // if(!(i<N && j<N)) return; // if(threadIdx.y<=threadIdx.z) // arr[threadIdx.z*TILE_SIZE + threadIdx.y] = g_in[i*N + j]; // else arr[threadIdx.z*TILE_SIZE + threadIdx.y] = 0.0; // __syncthreads(); int tx = threadIdx.x; // local threadid in x int ty = threadIdx.y; // local threadid in y int tz = threadIdx.z; //printf("%d %d %d \n",tx,ty,tz); int row = tile_y * TILE_SIZE + tz; // access row int column = tile_x * TILE_SIZE + ty; // access col if(ty < TILE_SIZE && tz < TILE_SIZE && tx<M) { int g_threadX = blockIdx.x * blockDim.x + threadIdx.x; int x = row*N + column; int global_id = ((g_threadX / chunk_size) *chunk_size)*N*N + x *chunk_size + (g_threadX % chunk_size); if(threadIdx.y<=threadIdx.z) s_mem[tz * TILE_SIZE + ty + shared_size_single_matrix*tx] = (row < N && column < N) ? g_mem[ global_id] : 0; // we need to think about access expression of global memory. //M: Total number of matrices. N:dim of matrix else s_mem[tz * TILE_SIZE + ty + shared_size_single_matrix*tx] = 0.0; } __syncthreads(); } // __device__ void store_lower_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) // { // // int i = m*TILE_SIZE + threadIdx.z; // // int j = n*TILE_SIZE + threadIdx.y; // // if(!(i<N && j<N)) return; // // if(threadIdx.y<=threadIdx.z) // // g_in[i*N + j] = arr[threadIdx.z*TILE_SIZE + threadIdx.y]; // // else g_in[i*N + j] = 0.0; // // __syncthreads(); // int tx = threadIdx.x; // local threadid in x // int ty = threadIdx.y; // local threadid in y // int tz = threadIdx.z; // //printf("%d %d %d \n",tx,ty,tz); // int row = tile_y * TILE_SIZE + tz; // access row // int column = tile_x * TILE_SIZE + ty; // access col // if(ty < TILE_SIZE && tz < TILE_SIZE && tx<M) // { // if(threadIdx.y<=threadIdx.z) // g_mem[ blockIdx.x * blockDim.x + tx + row * N * M + column*M] = s_mem[tz * TILE_SIZE + ty + shared_size_single_matrix*tx]; // we need to think about access expression of global memory. //M: Total number of matrices. N:dim of matrix // else // g_mem[ blockIdx.x * blockDim.x + tx + row * N * M + column*M] = 0.0; // } // __syncthreads(); // } __device__ void store_lower_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) { int tx = threadIdx.y; // local threadid in x int ty = threadIdx.z; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(row < N && column < N) { int g_threadX = blockIdx.x * blockDim.x + threadIdx.x; int x = row*N + column; int global_id = ((g_threadX / chunk_size) *chunk_size)*N*N + x *chunk_size + (g_threadX % chunk_size); g_mem[global_id] = (tx < TILE_SIZE && ty < TILE_SIZE && column <= row) ? s_mem[ty * TILE_SIZE + tx + shared_size_single_matrix*threadIdx.x] : 0; } __syncthreads(); } // __device__ void store_lower_tile(float *g_mem, float *s_mem, int tile_y, int tile_x, int N, int M, int shared_size_single_matrix) // { // int tx = threadIdx.y; // local threadid in x // int ty = threadIdx.z; // local threadid in y // int row = tile_y * TILE_SIZE + ty; // access row // int column = tile_x * TILE_SIZE + tx; // access col // if(row < N && column < N) // { // g_mem[blockIdx.x * blockDim.x + threadIdx.x + row * N * M + column*M] = (tx < TILE_SIZE && ty < TILE_SIZE && column <= row) ? s_mem[ty * TILE_SIZE + tx + shared_size_single_matrix*threadIdx.x] : 0; // } // __syncthreads(); // } __device__ void store_zeros(float *g_mem, int tile_y, int tile_x, int N, int M) { int tx = threadIdx.y; // local threadid in x int ty = threadIdx.z; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(row < N && column < N) { int g_threadX = blockIdx.x * blockDim.x + threadIdx.x; int x = row*N + column; int global_id = ((g_threadX / chunk_size) *chunk_size)*N*N + x *chunk_size + (g_threadX % chunk_size); g_mem[global_id] = 0; } __syncthreads(); } // __device__ void store_zeros(float *g_mem, int tile_y, int tile_x, int N, int M) // { // int tx = threadIdx.y; // local threadid in x // int ty = threadIdx.z; // local threadid in y // int row = tile_y * TILE_SIZE + ty; // access row // int column = tile_x * TILE_SIZE + tx; // access col // if(row < N && column < N) // { // // printf("hello %d %d\n", threadIdx.x ,blockIdx.x * blockDim.x + threadIdx.x + row * N * M + column*M); // g_mem[blockIdx.x * blockDim.x + threadIdx.x + row * N * M + column*M] = 0; // } // __syncthreads(); // } __device__ void print_matrix(float* h_A,int num_of_matrices,int dim_of_matrix) { for (int matrix_index = 0; matrix_index < num_of_matrices; matrix_index++) { for (int row = 0; row < dim_of_matrix; row++) { for (int column = 0; column < dim_of_matrix; column++) { //write_element = h_A[matrix_index * dim_of_matrix * dim_of_matrix + row * dim_of_matrix + column]; int global_id = row * dim_of_matrix * num_of_matrices + column * num_of_matrices + matrix_index; float write_element = h_A[global_id] ; printf("%0.2f ", write_element); } printf("\n"); } printf("\n"); } } __global__ void launch_kernel(float* d_mat, int N, int M , int shared_size_single_matrix) { extern __shared__ float shared_mem[]; float *rA1, *rA2, *rA3; rA1 = shared_mem; rA2 = shared_mem + TILE_SIZE*TILE_SIZE; rA3 = shared_mem + 2*TILE_SIZE*TILE_SIZE; int tx = threadIdx.x; int nn,kk,mm; int num_tiles = (N/TILE_SIZE) + ((N%TILE_SIZE)!=0); for(kk=0; kk<num_tiles; kk++) { for(nn=0; nn<kk; nn++) { // load_full_tile(kk,nn,d_mat,rA3,N); load_full_tile(d_mat , rA3, kk,nn,N,M, shared_size_single_matrix); for(mm=0; mm<nn; mm++) { // load_full_tile(kk,mm,d_mat,rA1,N); load_full_tile(d_mat , rA1, kk,mm,N,M, shared_size_single_matrix); // load_full_tile(nn,mm,d_mat,rA2,N); load_full_tile(d_mat , rA2, nn,mm,N,M, shared_size_single_matrix); sgemm_tile(&rA1[tx*shared_size_single_matrix],&rA2[tx*shared_size_single_matrix],&rA3[tx*shared_size_single_matrix]); __syncthreads(); } // load_lower_tile(nn,nn,d_mat,rA1,N); load_lower_tile(d_mat , rA1, nn,nn,N,M, shared_size_single_matrix); strsm_tile(&rA1[tx*shared_size_single_matrix], &rA3[tx*shared_size_single_matrix]); __syncthreads(); store_full_tile(d_mat , rA3, kk,nn,N,M, shared_size_single_matrix); } // load_lower_tile(kk,kk,d_mat,rA1,N); load_lower_tile(d_mat , rA1, kk,kk,N,M, shared_size_single_matrix); for(nn=0; nn<kk; nn++) { // load_full_tile(kk,nn,d_mat,rA2,N); load_full_tile(d_mat , rA2, kk,nn,N,M, shared_size_single_matrix); ssyrk_tile(&rA2[tx*shared_size_single_matrix],&rA1[tx*shared_size_single_matrix]); __syncthreads(); } spotrf_tile(&rA1[tx*shared_size_single_matrix]); //__syncthreads(); // store_lower_tile(kk,kk,d_mat,rA1,N); store_lower_tile(d_mat , rA1, kk,kk,N,M, shared_size_single_matrix); } rA1[threadIdx.z*TILE_SIZE + threadIdx.y] = 0.0; __syncthreads(); for(kk=0; kk<num_tiles; kk++) for(nn=kk+1; nn<num_tiles; nn++) { if(kk < nn) { // printf("hii\n %d %d", kk, nn); store_zeros(d_mat, kk, nn, N, M); } else { store_full_tile(d_mat , rA1, kk,nn,N,M, shared_size_single_matrix); } // if(threadIdx.x==0 && threadIdx.y==0 && threadIdx.z==0) // print_matrix(d_mat,M,N); } } int main() { // READ FROM THE INPUT FILE FILE *fptr; fptr = fopen("./dataset/size4_256matrices.txt", "r"); int num_of_matrices, dim_of_matrix; fscanf(fptr, "%d", &num_of_matrices); fscanf(fptr, "%d", &dim_of_matrix); float read_element; float* h_A = NULL; int numElements = num_of_matrices * dim_of_matrix * dim_of_matrix; size_t size = numElements * sizeof(float); cudaDeviceProp devp; cudaGetDeviceProperties(&devp, 0); h_A = (float *)malloc(size); int global_id = 0; for(int p = 0; p < (num_of_matrices/chunk_size); p++) { for (int matrix_index = 0; matrix_index < chunk_size; matrix_index++) { for (int row = 0; row < dim_of_matrix; row++) { for (int column = 0; column < dim_of_matrix; column++) { fscanf(fptr, "%f", &read_element); int x = row*dim_of_matrix + column; global_id = (p*chunk_size)*dim_of_matrix*dim_of_matrix + x*chunk_size + matrix_index; h_A[global_id] = read_element; } } } } printf("\nRead from the input file successfully!\n"); fclose(fptr); printf("\nPrinting the host-side input array read from the input file:\n"); for (int i = 0; i < numElements; i++) { printf("%f ", h_A[i]); } printf("\n\n"); // COPY TO DEVICE cudaError_t err = cudaSuccess; float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } else { printf("Copied the h_A to device side successfully!\n\n"); } // LAUNCH KERNEL int num_of_matrices_per_block = min(min(1024/(TILE_SIZE * TILE_SIZE) , num_of_matrices), chunk_size); dim3 grid(num_of_matrices / num_of_matrices_per_block , 1, 1); dim3 block(num_of_matrices_per_block, TILE_SIZE, TILE_SIZE); // no of tiles in a column int INPUT_SIZE = dim_of_matrix; int no_of_tiles = (INPUT_SIZE / TILE_SIZE) + (INPUT_SIZE % TILE_SIZE != 0); // ceil of (INPUT_SIZE / TILE_SIZE) launch_kernel<<<grid, block, num_of_matrices_per_block * 3 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,3 * TILE_SIZE * TILE_SIZE); // cudaMemcpy(h_mat, d_mat, size, cudaMemcpyDeviceToHost); // print_matrix(h_mat, N, N); err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } else { printf("\nCopied d_A to host side successfully!\n"); } printf("\nPrinting the output of cudememcopyDeviceToHost, i.e. the host-side array returned from device side:\n"); for (int i = 0; i < numElements; i++) { printf("%f ", h_A[i]); } err = cudaFree(d_A); if(err != cudaSuccess) { fprintf(stderr, "\nFailed to free device matrix M (error code %s)\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaDeviceReset(); if(err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaError_t cudaerr = cudaDeviceSynchronize(); if (cudaerr != cudaSuccess) { printf("kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); } FILE *fptr1; fptr1 = fopen("./output_c.txt", "w+"); float write_element; fprintf(fptr1, "%d\n", num_of_matrices); fprintf(fptr1, "%d\n", dim_of_matrix); for(int p = 0; p < (num_of_matrices/chunk_size); p++) { for (int matrix_index = 0; matrix_index < chunk_size; matrix_index++) { for (int row = 0; row < dim_of_matrix; row++) { for (int column = 0; column < dim_of_matrix; column++) { // fscanf(fptr, "%f", &read_element); int x = row*dim_of_matrix + column; global_id = (p*chunk_size)*dim_of_matrix*dim_of_matrix + x*chunk_size + matrix_index; write_element = h_A[global_id]; fprintf(fptr1, "%0.2f ", write_element); // printf("At pos %d we get %0.2f\n", global_id, h_A[global_id]); // printf("%0.2f \n ", h_A[global_id]); } fprintf(fptr1, "\n"); } // fprintf(fptr1, "\n"); } } fclose(fptr1); free(h_A); printf("\n\nAll tasks completed successfully!\n\n"); return 0; }
4e345fdcd33c677273ea91645e31b73fda738ca4.hip
// !!! This is a file automatically generated by hipify!!! #include "hipsparse.h" #include <hip/hip_runtime.h> #include "utils.h" #include "time.h" int main(int argc, char *argv[]) { /*********************************************** * initialize program's input parameters * ***********************************************/ double alpha = 1; double beta = 0; double norm = 0; hipsparseHandle_t handle = 0; hipsparseMatDescr_t descr = 0; hipsparseCreate(&handle); hipsparseCreateMatDescr(&descr); h_vec_t<double> h_distance_1; int num_feat_1 = atoi(argv[2]); ReadMatrix(h_distance_1, argv[1], num_feat_1); #ifdef ACCELERATE std::cout << "CUDA" << std::endl; d_vec_t<double> d_distance_1 = h_distance_1; #endif h_vec_t<double> h_distance_2; int num_feat_2 = atoi(argv[4]); ReadMatrix(h_distance_2, argv[3], num_feat_2); #ifdef ACCELERATE d_vec_t<double> d_distance_2 = h_distance_2; #endif h_vec_t<double> h_distance_3; int num_feat_3 = atoi(argv[6]); ReadMatrix(h_distance_3, argv[5], num_feat_3); #ifdef ACCELERATE d_vec_t<double> d_distance_3 = h_distance_3; #endif int num_iters = 20; if (10 == argc) num_iters = atoi(argv[9]); /************************************************** * construct affinity COO matrix * ***************************************************/ double *distance1 = raw_pointer_cast(h_distance_1.data()); double *distance2 = raw_pointer_cast(h_distance_2.data()); double *distance3 = raw_pointer_cast(h_distance_3.data()); const clock_t begin_time = clock(); stdvec_tuple_t aff_coo = AffinityOrigCoo(distance1, distance2, distance3, num_feat_1, num_feat_2, num_feat_3); // h_vec_t<double> value; // h_vec_t<int> column; // h_vec_t<int> row; d_vec_t<double> d_value(get<0>(aff_coo)); d_vec_t<int> d_column(get<1>(aff_coo)); d_vec_t<int> d_row(get<2>(aff_coo)); d_vec_t<int> d_csr_row(num_feat_1 * num_feat_2 + 1); hipsparseXcoo2csr(handle, raw_pointer_cast(d_row.data()), d_row.size(), num_feat_1 * num_feat_2, raw_pointer_cast(d_csr_row.data()), HIPSPARSE_INDEX_BASE_ZERO); std::cout << "affinity runtime: " << float(clock() - begin_time) / CLOCKS_PER_SEC * 1000 << std::endl; // for (int i = 0; i < get<0>(aff_coo).size(); ++i) { // std::cout << "values: " << get<0>(aff_coo)[i] // << " columns: " << get<1>(aff_coo)[i] // << " rows: " << get<2>(aff_coo)[i] << std::endl; //} // std::cout << "affinity" << std::endl; // std::cout << "values " // << " " // << "columns" // << " " // << "rows" << std::endl; // for (int i = 0; i < d_value.size(); ++i) { // std::cout << d_value[i] << " " << d_column[i] << " " << d_row[i] // << std::endl; // } // std::cout << std::endl; /************************************************ * initialize eigen vectors * ************************************************/ int len_eigen_vec = num_feat_1 * num_feat_2 * num_feat_3; d_vec_t<double> d_eigen_new(len_eigen_vec); fill(d_eigen_new.begin(), d_eigen_new.end(), 0); d_vec_t<double> d_eigen_old(len_eigen_vec); norm = 1.0 / sqrt(len_eigen_vec); fill(d_eigen_old.begin(), d_eigen_old.end(), norm); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); /************************************************ * computing eigen vector * ************************************************/ const clock_t begin_time2 = clock(); for (int i = 0; i < num_iters; ++i) { hipsparseDcsrmv( handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, len_eigen_vec, len_eigen_vec, d_value.size(), &alpha, descr, raw_pointer_cast(d_value.data()), raw_pointer_cast(d_csr_row.data()), raw_pointer_cast(d_column.data()), raw_pointer_cast(d_eigen_old.data()), &beta, raw_pointer_cast(d_eigen_new.data())); double init = 0; norm = std::sqrt(transform_reduce(d_eigen_new.begin(), d_eigen_new.end(), square(), init, thrust::plus<double>())); transform(d_eigen_new.begin(), d_eigen_new.end(), d_eigen_old.begin(), division(norm)); fill(d_eigen_new.begin(), d_eigen_new.end(), 0); } std::cout << "Eigen runtime: " << float(clock() - begin_time2) / CLOCKS_PER_SEC * 1000 << std::endl; // std::cout << "eigen values" << std::endl; // for (int i = 0; i < d_eigen_old.size(); i++) { // std::cout << "eigen new value = " << d_eigen_new[i] << " "; // std::cout << "eigen old value = " << d_eigen_old[i] << std::endl; // } hipsparseDestroyMatDescr(descr); descr = 0; // destroy handle hipsparseDestroy(handle); handle = 0; return (0); }
4e345fdcd33c677273ea91645e31b73fda738ca4.cu
#include "cusparse.h" #include <cuda_runtime.h> #include "utils.h" #include "time.h" int main(int argc, char *argv[]) { /*********************************************** * initialize program's input parameters * ***********************************************/ double alpha = 1; double beta = 0; double norm = 0; cusparseHandle_t handle = 0; cusparseMatDescr_t descr = 0; cusparseCreate(&handle); cusparseCreateMatDescr(&descr); h_vec_t<double> h_distance_1; int num_feat_1 = atoi(argv[2]); ReadMatrix(h_distance_1, argv[1], num_feat_1); #ifdef ACCELERATE std::cout << "CUDA" << std::endl; d_vec_t<double> d_distance_1 = h_distance_1; #endif h_vec_t<double> h_distance_2; int num_feat_2 = atoi(argv[4]); ReadMatrix(h_distance_2, argv[3], num_feat_2); #ifdef ACCELERATE d_vec_t<double> d_distance_2 = h_distance_2; #endif h_vec_t<double> h_distance_3; int num_feat_3 = atoi(argv[6]); ReadMatrix(h_distance_3, argv[5], num_feat_3); #ifdef ACCELERATE d_vec_t<double> d_distance_3 = h_distance_3; #endif int num_iters = 20; if (10 == argc) num_iters = atoi(argv[9]); /************************************************** * construct affinity COO matrix * ***************************************************/ double *distance1 = raw_pointer_cast(h_distance_1.data()); double *distance2 = raw_pointer_cast(h_distance_2.data()); double *distance3 = raw_pointer_cast(h_distance_3.data()); const clock_t begin_time = clock(); stdvec_tuple_t aff_coo = AffinityOrigCoo(distance1, distance2, distance3, num_feat_1, num_feat_2, num_feat_3); // h_vec_t<double> value; // h_vec_t<int> column; // h_vec_t<int> row; d_vec_t<double> d_value(get<0>(aff_coo)); d_vec_t<int> d_column(get<1>(aff_coo)); d_vec_t<int> d_row(get<2>(aff_coo)); d_vec_t<int> d_csr_row(num_feat_1 * num_feat_2 + 1); cusparseXcoo2csr(handle, raw_pointer_cast(d_row.data()), d_row.size(), num_feat_1 * num_feat_2, raw_pointer_cast(d_csr_row.data()), CUSPARSE_INDEX_BASE_ZERO); std::cout << "affinity runtime: " << float(clock() - begin_time) / CLOCKS_PER_SEC * 1000 << std::endl; // for (int i = 0; i < get<0>(aff_coo).size(); ++i) { // std::cout << "values: " << get<0>(aff_coo)[i] // << " columns: " << get<1>(aff_coo)[i] // << " rows: " << get<2>(aff_coo)[i] << std::endl; //} // std::cout << "affinity" << std::endl; // std::cout << "values " // << " " // << "columns" // << " " // << "rows" << std::endl; // for (int i = 0; i < d_value.size(); ++i) { // std::cout << d_value[i] << " " << d_column[i] << " " << d_row[i] // << std::endl; // } // std::cout << std::endl; /************************************************ * initialize eigen vectors * ************************************************/ int len_eigen_vec = num_feat_1 * num_feat_2 * num_feat_3; d_vec_t<double> d_eigen_new(len_eigen_vec); fill(d_eigen_new.begin(), d_eigen_new.end(), 0); d_vec_t<double> d_eigen_old(len_eigen_vec); norm = 1.0 / sqrt(len_eigen_vec); fill(d_eigen_old.begin(), d_eigen_old.end(), norm); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); /************************************************ * computing eigen vector * ************************************************/ const clock_t begin_time2 = clock(); for (int i = 0; i < num_iters; ++i) { cusparseDcsrmv( handle, CUSPARSE_OPERATION_NON_TRANSPOSE, len_eigen_vec, len_eigen_vec, d_value.size(), &alpha, descr, raw_pointer_cast(d_value.data()), raw_pointer_cast(d_csr_row.data()), raw_pointer_cast(d_column.data()), raw_pointer_cast(d_eigen_old.data()), &beta, raw_pointer_cast(d_eigen_new.data())); double init = 0; norm = std::sqrt(transform_reduce(d_eigen_new.begin(), d_eigen_new.end(), square(), init, thrust::plus<double>())); transform(d_eigen_new.begin(), d_eigen_new.end(), d_eigen_old.begin(), division(norm)); fill(d_eigen_new.begin(), d_eigen_new.end(), 0); } std::cout << "Eigen runtime: " << float(clock() - begin_time2) / CLOCKS_PER_SEC * 1000 << std::endl; // std::cout << "eigen values" << std::endl; // for (int i = 0; i < d_eigen_old.size(); i++) { // std::cout << "eigen new value = " << d_eigen_new[i] << " "; // std::cout << "eigen old value = " << d_eigen_old[i] << std::endl; // } cusparseDestroyMatDescr(descr); descr = 0; // destroy handle cusparseDestroy(handle); handle = 0; return (0); }
d6ae4dd7fef8e131fac928f61bc54451a285d8ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> using namespace std; __global__ void add( int a, int b, int *c ) { *c = a + b; } int main( void ) { int c; int *dev_c; hipMalloc( &dev_c, sizeof(int) ) ; hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2, 7, dev_c ); hipMemcpy( &c, dev_c, sizeof(int),hipMemcpyDeviceToHost ); //printf( "2 + 7 = %d\n",c); cout<<"2 + 7 ="<<c; hipFree( dev_c ); return 0; }
d6ae4dd7fef8e131fac928f61bc54451a285d8ec.cu
#include <iostream> using namespace std; __global__ void add( int a, int b, int *c ) { *c = a + b; } int main( void ) { int c; int *dev_c; cudaMalloc( &dev_c, sizeof(int) ) ; add<<<1,1>>>( 2, 7, dev_c ); cudaMemcpy( &c, dev_c, sizeof(int),cudaMemcpyDeviceToHost ); //printf( "2 + 7 = %d\n",c); cout<<"2 + 7 ="<<c; cudaFree( dev_c ); return 0; }
f946785963d46923f5f66426b668b93dcac0d92a.hip
// !!! This is a file automatically generated by hipify!!! #include <unistd.h> #include <algorithm> #include <cstdlib> #include <limits> #include <vector> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #define STB_IMAGE_IMPLEMENTATION #include "../../utils/stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "../../utils/stb_image_write.h" #include "../../utils/cycletimer.h" #define MAX_INTENSITY 256 #define NCHUNK 400 #define CHANNEL_NUM 1 #define BLOCK_SIDE 5 #define SCAN_BLOCK_DIM 256 #include "inclusiveScan.cu_inl" #define LOG2_WARP_SIZE 5U #define WARP_SIZE (1U << LOG2_WARP_SIZE) // modified from https://stackoverflow.com/questions/17399119/cant-we-use-atomic-operations-for-floating-point-variables-in-cuda __device__ static float atomicMax(float* address, float val) { int* address_as_i = (int*) address; int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(fmaxf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } __global__ void set_histograms(uint8_t* old_img, uint8_t* new_img, int* histograms, int* sum_vals, int img_width, int img_height){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i>=img_width || j>=img_height) return; int cur_color = old_img[img_width * j + i]; atomicAdd(&histograms[cur_color], 1); atomicAdd(&sum_vals[cur_color], cur_color); } //TODO convert this to the variable __global__ void otsu_single(int* global_threshold, uint8_t* old_img, uint8_t* new_img, int* histograms, int* sum_vals, int img_width, int img_height){ __shared__ int input_histogram[SCAN_BLOCK_DIM]; __shared__ int output_histogram[SCAN_BLOCK_DIM]; __shared__ int scratch_histogram[2 * SCAN_BLOCK_DIM]; __shared__ int input_sum[SCAN_BLOCK_DIM]; __shared__ int output_sum[SCAN_BLOCK_DIM]; __shared__ int scratch_sum[2 * SCAN_BLOCK_DIM]; __shared__ float max[1]; __shared__ int threshold; int id = blockIdx.x * blockDim.x + threadIdx.x; if(id==0){ max[0] = 0; } //__syncthreads(); input_histogram[id] = histograms[id]; input_sum[id] = sum_vals[id]; //printf("HERE %d", max[0]); __syncthreads(); sharedMemInclusiveScan(id, input_histogram, output_histogram, scratch_histogram, input_sum, output_sum, scratch_sum, SCAN_BLOCK_DIM); __syncthreads(); float p1_num = output_histogram[id] + 0.001; float p2_num = output_histogram[SCAN_BLOCK_DIM-1] - p1_num + 0.002; int total_sum = output_sum[SCAN_BLOCK_DIM-1]; int p1_sum = output_sum[id]; int p2_sum = total_sum - p1_sum; float p1_mu = ((float)(input_sum[id]))/p1_num; float p2_mu = ((float)p2_sum)/p2_num; float mu_diff = (p1_mu - p2_mu)/256; float var = p1_num * p2_num * mu_diff * mu_diff; atomicMax(max, var); if(var==max[0]){ threshold = id; } __syncthreads(); *global_threshold = threshold; } __global__ void set_val(int* threshold, uint8_t* old_img, uint8_t* new_img, int img_width, int img_height){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int id = j*img_width + i; if(i>=img_width && j>=img_width) return; //printf("HEREH %d", *threshold); if(old_img[id] > *threshold){ new_img[id] = 255; } else{ new_img[id] = 0; } } __global__ void set_histograms_zero(int* histograms,int* sum_vals){ int i = blockIdx.x * blockDim.x + threadIdx.x; histograms[i] = 0; sum_vals[i]=0; } void set_otsu(uint8_t* &old_img, int width, int height, int block_side, uint8_t* new_img_device, uint8_t* old_img_device, int* histograms, int* sum_vals) { int* global_threshold_device; uint8_t* new_img = (uint8_t*)malloc(sizeof(uint8_t) * height * width * CHANNEL_NUM); hipMalloc(&histograms, sizeof(int) * MAX_INTENSITY); hipMalloc(&sum_vals, sizeof(int) * MAX_INTENSITY ); hipMalloc(&new_img_device, sizeof(uint8_t) * height * width*CHANNEL_NUM ); hipMalloc(&old_img_device, sizeof(uint8_t) * height * width*CHANNEL_NUM ); hipMalloc(&global_threshold_device, sizeof(int)); float start_time_exc = currentSeconds(); dim3 threadsPerBlock(block_side, block_side); dim3 gridDim((width+threadsPerBlock.x-1)/threadsPerBlock.x, (height+threadsPerBlock.y-1)/threadsPerBlock.y); for(int i=0; i< NCHUNK; i++){ hipMemcpy(old_img_device, old_img, sizeof(uint8_t) * height * width*CHANNEL_NUM, hipMemcpyHostToDevice); int* threshold = (int*)malloc(sizeof(int)); //(uint8_t* old_img, uint8_t* new_img, int* histograms, int* sum_vals, int img_width, int img_height) hipLaunchKernelGGL(( set_histograms), dim3(gridDim), dim3(threadsPerBlock), 0, 0, old_img_device, new_img_device, histograms, sum_vals, width, height); //dim3 threadsPerBlock(MAX_INTENSITY); //dim3 gridDim((width+threadsPerBlock.x-1)/threadsPerBlock.x, (height+threadsPerBlock.y-1)/threadsPerBlock.y); hipLaunchKernelGGL(( otsu_single), dim3(1), dim3(MAX_INTENSITY), 0, 0, global_threshold_device, old_img_device, new_img_device, histograms, sum_vals, width, height); hipLaunchKernelGGL(( set_val), dim3(gridDim), dim3(threadsPerBlock), 0, 0, global_threshold_device, old_img_device, new_img_device, width, height); hipMemcpy(new_img, new_img_device, sizeof(uint8_t) * height * width * CHANNEL_NUM, hipMemcpyDeviceToHost); printf("OK"); } //stbi_write_png("cs_test1_out.png", width, height, CHANNEL_NUM, new_img, width*CHANNEL_NUM); float end_time = currentSeconds(); hipFree(histograms); hipFree(sum_vals); hipFree(new_img_device); hipFree(old_img_device); float duration_exc = end_time - start_time_exc; fprintf(stdout, "Time Without Startup: %f\n", duration_exc); } void set_otsu_streamed(uint8_t* &old_img, int width, int height, int block_side, uint8_t* new_img_device, uint8_t* old_img_device, int* histograms, int* sum_vals) { hipStream_t stream[NCHUNK]; int i; int* global_threshold_device; int* threshold = (int*)malloc(sizeof(int)); dim3 threadsPerBlock(block_side, block_side); dim3 gridDim((width+threadsPerBlock.x-1)/threadsPerBlock.x, (height+threadsPerBlock.y-1)/threadsPerBlock.y); int col_di = MAX_INTENSITY*sizeof(int); int img_di = height * width*CHANNEL_NUM; uint8_t* new_img = (uint8_t*)malloc(NCHUNK* sizeof(uint8_t) * height * width * CHANNEL_NUM); /*for(i=0; i<NCHUNK; i++){ new_img[i] = (uint8_t*)malloc(); } */ hipMalloc(&global_threshold_device, NCHUNK * sizeof(int)); int col_shift, img_shift; hipMalloc(&histograms, NCHUNK * sizeof(int) * MAX_INTENSITY); hipMalloc(&sum_vals, NCHUNK * sizeof(int) * MAX_INTENSITY ); hipMalloc(&new_img_device, NCHUNK * sizeof(uint8_t) * height * width*CHANNEL_NUM ); hipMalloc(&old_img_device, NCHUNK * sizeof(uint8_t) * height * width*CHANNEL_NUM ); for(i = 0; i < NCHUNK;i++){ hipStreamCreate(&stream[i]); } float start_time_exc = currentSeconds(); col_shift = 0; img_shift = 0; for(i=0; i<NCHUNK; i++){ img_shift += img_di; hipMemcpyAsync(old_img_device + img_shift , old_img, sizeof(uint8_t) * height * width*CHANNEL_NUM, hipMemcpyHostToDevice, stream[i]); } col_shift = 0; img_shift = 0; for(i=0;i<NCHUNK;i++) { col_shift += col_di; img_shift += img_di; hipLaunchKernelGGL(( set_histograms_zero), dim3(1), dim3(MAX_INTENSITY), 0, stream[i], histograms + col_shift, sum_vals+col_shift); hipLaunchKernelGGL(( set_histograms), dim3(gridDim), dim3(threadsPerBlock), 0, stream[i], old_img_device + img_shift, new_img_device+img_shift, histograms+col_shift, sum_vals+col_shift, width, height); hipLaunchKernelGGL(( otsu_single), dim3(1), dim3(MAX_INTENSITY), 0, stream[i], global_threshold_device + i*sizeof(int), old_img_device+ img_shift, new_img_device+img_shift, histograms+col_shift, sum_vals+col_shift, width, height); hipLaunchKernelGGL(( set_val), dim3(gridDim), dim3(threadsPerBlock), 0, stream[i], global_threshold_device+ i*sizeof(int), old_img_device+ img_shift, new_img_device+img_shift, width, height); } img_shift = 0; for(i=0;i<NCHUNK;i++) { img_shift += img_di; hipMemcpyAsync(new_img+img_shift, new_img_device + img_shift, sizeof(uint8_t) * height * width * CHANNEL_NUM, hipMemcpyDeviceToHost, stream[i]); } for(i=0; i<NCHUNK; i++) { hipStreamSynchronize(stream[i]); } for(i=0;i<NCHUNK;i++) { printf("OK"); } float end_time = currentSeconds(); for(i=0;i<NCHUNK;i++) { stbi_write_png("cs_test1_out.png", width, height, CHANNEL_NUM, new_img +img_di , width*CHANNEL_NUM); } float duration_exc = end_time - start_time_exc; fprintf(stdout, "Time Without Startup: %f\n", duration_exc); } // eg: ./edge /afs/andrew.cmu.edu/usr12/sbali/private/proj/images/building.jpg 32 <type> // NOTE shared doesn't support arg block size it is just a place holder here int main(int argc, char **argv){ const char *img_file = argv[1]; int block_side = atoi(argv[2]); int width, height, bpp; uint8_t* old_img_device; uint8_t* new_img_device; int* histograms, *sum_vals; uint8_t* old_img = stbi_load(img_file, &width, &height, &bpp, CHANNEL_NUM); //hipMalloc(&kernel_device, sizeof(float) * 9); //hipMemcpy(kernel_device, HSOBEL, sizeof(float) * 9, hipMemcpyHostToDevice); char type = argv[3][0]; //set_otsu_streamed(old_img, width, height, block_side, new_img_device, old_img_device, histograms, sum_vals); if(type=='s'){ set_otsu_streamed(old_img, width, height, block_side, new_img_device, old_img_device, histograms, sum_vals); } else{ set_otsu(old_img, width, height, block_side, new_img_device, old_img_device, histograms, sum_vals); } hipFree(histograms); hipFree(sum_vals); hipFree(new_img_device); hipFree(old_img_device); return 1; }
f946785963d46923f5f66426b668b93dcac0d92a.cu
#include <unistd.h> #include <algorithm> #include <cstdlib> #include <limits> #include <vector> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #define STB_IMAGE_IMPLEMENTATION #include "../../utils/stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "../../utils/stb_image_write.h" #include "../../utils/cycletimer.h" #define MAX_INTENSITY 256 #define NCHUNK 400 #define CHANNEL_NUM 1 #define BLOCK_SIDE 5 #define SCAN_BLOCK_DIM 256 #include "inclusiveScan.cu_inl" #define LOG2_WARP_SIZE 5U #define WARP_SIZE (1U << LOG2_WARP_SIZE) // modified from https://stackoverflow.com/questions/17399119/cant-we-use-atomic-operations-for-floating-point-variables-in-cuda __device__ static float atomicMax(float* address, float val) { int* address_as_i = (int*) address; int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(fmaxf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } __global__ void set_histograms(uint8_t* old_img, uint8_t* new_img, int* histograms, int* sum_vals, int img_width, int img_height){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i>=img_width || j>=img_height) return; int cur_color = old_img[img_width * j + i]; atomicAdd(&histograms[cur_color], 1); atomicAdd(&sum_vals[cur_color], cur_color); } //TODO convert this to the variable __global__ void otsu_single(int* global_threshold, uint8_t* old_img, uint8_t* new_img, int* histograms, int* sum_vals, int img_width, int img_height){ __shared__ int input_histogram[SCAN_BLOCK_DIM]; __shared__ int output_histogram[SCAN_BLOCK_DIM]; __shared__ int scratch_histogram[2 * SCAN_BLOCK_DIM]; __shared__ int input_sum[SCAN_BLOCK_DIM]; __shared__ int output_sum[SCAN_BLOCK_DIM]; __shared__ int scratch_sum[2 * SCAN_BLOCK_DIM]; __shared__ float max[1]; __shared__ int threshold; int id = blockIdx.x * blockDim.x + threadIdx.x; if(id==0){ max[0] = 0; } //__syncthreads(); input_histogram[id] = histograms[id]; input_sum[id] = sum_vals[id]; //printf("HERE %d", max[0]); __syncthreads(); sharedMemInclusiveScan(id, input_histogram, output_histogram, scratch_histogram, input_sum, output_sum, scratch_sum, SCAN_BLOCK_DIM); __syncthreads(); float p1_num = output_histogram[id] + 0.001; float p2_num = output_histogram[SCAN_BLOCK_DIM-1] - p1_num + 0.002; int total_sum = output_sum[SCAN_BLOCK_DIM-1]; int p1_sum = output_sum[id]; int p2_sum = total_sum - p1_sum; float p1_mu = ((float)(input_sum[id]))/p1_num; float p2_mu = ((float)p2_sum)/p2_num; float mu_diff = (p1_mu - p2_mu)/256; float var = p1_num * p2_num * mu_diff * mu_diff; atomicMax(max, var); if(var==max[0]){ threshold = id; } __syncthreads(); *global_threshold = threshold; } __global__ void set_val(int* threshold, uint8_t* old_img, uint8_t* new_img, int img_width, int img_height){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int id = j*img_width + i; if(i>=img_width && j>=img_width) return; //printf("HEREH %d", *threshold); if(old_img[id] > *threshold){ new_img[id] = 255; } else{ new_img[id] = 0; } } __global__ void set_histograms_zero(int* histograms,int* sum_vals){ int i = blockIdx.x * blockDim.x + threadIdx.x; histograms[i] = 0; sum_vals[i]=0; } void set_otsu(uint8_t* &old_img, int width, int height, int block_side, uint8_t* new_img_device, uint8_t* old_img_device, int* histograms, int* sum_vals) { int* global_threshold_device; uint8_t* new_img = (uint8_t*)malloc(sizeof(uint8_t) * height * width * CHANNEL_NUM); cudaMalloc(&histograms, sizeof(int) * MAX_INTENSITY); cudaMalloc(&sum_vals, sizeof(int) * MAX_INTENSITY ); cudaMalloc(&new_img_device, sizeof(uint8_t) * height * width*CHANNEL_NUM ); cudaMalloc(&old_img_device, sizeof(uint8_t) * height * width*CHANNEL_NUM ); cudaMalloc(&global_threshold_device, sizeof(int)); float start_time_exc = currentSeconds(); dim3 threadsPerBlock(block_side, block_side); dim3 gridDim((width+threadsPerBlock.x-1)/threadsPerBlock.x, (height+threadsPerBlock.y-1)/threadsPerBlock.y); for(int i=0; i< NCHUNK; i++){ cudaMemcpy(old_img_device, old_img, sizeof(uint8_t) * height * width*CHANNEL_NUM, cudaMemcpyHostToDevice); int* threshold = (int*)malloc(sizeof(int)); //(uint8_t* old_img, uint8_t* new_img, int* histograms, int* sum_vals, int img_width, int img_height) set_histograms<<<gridDim, threadsPerBlock>>>(old_img_device, new_img_device, histograms, sum_vals, width, height); //dim3 threadsPerBlock(MAX_INTENSITY); //dim3 gridDim((width+threadsPerBlock.x-1)/threadsPerBlock.x, (height+threadsPerBlock.y-1)/threadsPerBlock.y); otsu_single<<<1, MAX_INTENSITY>>>(global_threshold_device, old_img_device, new_img_device, histograms, sum_vals, width, height); set_val<<<gridDim, threadsPerBlock>>>(global_threshold_device, old_img_device, new_img_device, width, height); cudaMemcpy(new_img, new_img_device, sizeof(uint8_t) * height * width * CHANNEL_NUM, cudaMemcpyDeviceToHost); printf("OK"); } //stbi_write_png("cs_test1_out.png", width, height, CHANNEL_NUM, new_img, width*CHANNEL_NUM); float end_time = currentSeconds(); cudaFree(histograms); cudaFree(sum_vals); cudaFree(new_img_device); cudaFree(old_img_device); float duration_exc = end_time - start_time_exc; fprintf(stdout, "Time Without Startup: %f\n", duration_exc); } void set_otsu_streamed(uint8_t* &old_img, int width, int height, int block_side, uint8_t* new_img_device, uint8_t* old_img_device, int* histograms, int* sum_vals) { cudaStream_t stream[NCHUNK]; int i; int* global_threshold_device; int* threshold = (int*)malloc(sizeof(int)); dim3 threadsPerBlock(block_side, block_side); dim3 gridDim((width+threadsPerBlock.x-1)/threadsPerBlock.x, (height+threadsPerBlock.y-1)/threadsPerBlock.y); int col_di = MAX_INTENSITY*sizeof(int); int img_di = height * width*CHANNEL_NUM; uint8_t* new_img = (uint8_t*)malloc(NCHUNK* sizeof(uint8_t) * height * width * CHANNEL_NUM); /*for(i=0; i<NCHUNK; i++){ new_img[i] = (uint8_t*)malloc(); } */ cudaMalloc(&global_threshold_device, NCHUNK * sizeof(int)); int col_shift, img_shift; cudaMalloc(&histograms, NCHUNK * sizeof(int) * MAX_INTENSITY); cudaMalloc(&sum_vals, NCHUNK * sizeof(int) * MAX_INTENSITY ); cudaMalloc(&new_img_device, NCHUNK * sizeof(uint8_t) * height * width*CHANNEL_NUM ); cudaMalloc(&old_img_device, NCHUNK * sizeof(uint8_t) * height * width*CHANNEL_NUM ); for(i = 0; i < NCHUNK;i++){ cudaStreamCreate(&stream[i]); } float start_time_exc = currentSeconds(); col_shift = 0; img_shift = 0; for(i=0; i<NCHUNK; i++){ img_shift += img_di; cudaMemcpyAsync(old_img_device + img_shift , old_img, sizeof(uint8_t) * height * width*CHANNEL_NUM, cudaMemcpyHostToDevice, stream[i]); } col_shift = 0; img_shift = 0; for(i=0;i<NCHUNK;i++) { col_shift += col_di; img_shift += img_di; set_histograms_zero<<<1, MAX_INTENSITY, 0, stream[i]>>>(histograms + col_shift, sum_vals+col_shift); set_histograms<<<gridDim, threadsPerBlock, 0, stream[i]>>>(old_img_device + img_shift, new_img_device+img_shift, histograms+col_shift, sum_vals+col_shift, width, height); otsu_single<<<1, MAX_INTENSITY, 0, stream[i]>>>(global_threshold_device + i*sizeof(int), old_img_device+ img_shift, new_img_device+img_shift, histograms+col_shift, sum_vals+col_shift, width, height); set_val<<<gridDim, threadsPerBlock, 0, stream[i]>>>(global_threshold_device+ i*sizeof(int), old_img_device+ img_shift, new_img_device+img_shift, width, height); } img_shift = 0; for(i=0;i<NCHUNK;i++) { img_shift += img_di; cudaMemcpyAsync(new_img+img_shift, new_img_device + img_shift, sizeof(uint8_t) * height * width * CHANNEL_NUM, cudaMemcpyDeviceToHost, stream[i]); } for(i=0; i<NCHUNK; i++) { cudaStreamSynchronize(stream[i]); } for(i=0;i<NCHUNK;i++) { printf("OK"); } float end_time = currentSeconds(); for(i=0;i<NCHUNK;i++) { stbi_write_png("cs_test1_out.png", width, height, CHANNEL_NUM, new_img +img_di , width*CHANNEL_NUM); } float duration_exc = end_time - start_time_exc; fprintf(stdout, "Time Without Startup: %f\n", duration_exc); } // eg: ./edge /afs/andrew.cmu.edu/usr12/sbali/private/proj/images/building.jpg 32 <type> // NOTE shared doesn't support arg block size it is just a place holder here int main(int argc, char **argv){ const char *img_file = argv[1]; int block_side = atoi(argv[2]); int width, height, bpp; uint8_t* old_img_device; uint8_t* new_img_device; int* histograms, *sum_vals; uint8_t* old_img = stbi_load(img_file, &width, &height, &bpp, CHANNEL_NUM); //cudaMalloc(&kernel_device, sizeof(float) * 9); //cudaMemcpy(kernel_device, HSOBEL, sizeof(float) * 9, cudaMemcpyHostToDevice); char type = argv[3][0]; //set_otsu_streamed(old_img, width, height, block_side, new_img_device, old_img_device, histograms, sum_vals); if(type=='s'){ set_otsu_streamed(old_img, width, height, block_side, new_img_device, old_img_device, histograms, sum_vals); } else{ set_otsu(old_img, width, height, block_side, new_img_device, old_img_device, histograms, sum_vals); } cudaFree(histograms); cudaFree(sum_vals); cudaFree(new_img_device); cudaFree(old_img_device); return 1; }
bdf7f4049fef336ea74280e6c20c86a0f0b199ac.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cmath> #include <hip/hip_runtime.h> static const int ThreadsPerBlock = 512; static __global__ void fractal(const int width, const int start_frame, const int gpu_frames, unsigned char* const pic) { // todo: use the GPU to compute the requested frames (base the code on the previous project) } unsigned char* GPU_Init(const int gpu_frames, const int width) { unsigned char* d_pic; if (hipSuccess != hipMalloc((void **)&d_pic, gpu_frames * width * width * sizeof(unsigned char))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);} return d_pic; } void GPU_Exec(const int start_frame, const int gpu_frames, const int width, unsigned char* d_pic) { // todo: launch the kernel with ThreadsPerBlock and the appropriate number of blocks (do not wait for the kernel to finish) FractalKernel<<<((gpu_frames - start_frame) * width + ThreadsPerBlock-1/ ThreadsPerBlock,ThreadsPerBlock>>>(start_frame,gpu_frame,width, d_pic); } void GPU_Fini(const int gpu_frames, const int width, unsigned char* pic, unsigned char* d_pic) { // todo: copy the result from the device to the host and free the device memory }
bdf7f4049fef336ea74280e6c20c86a0f0b199ac.cu
#include <cstdio> #include <cmath> #include <cuda.h> static const int ThreadsPerBlock = 512; static __global__ void fractal(const int width, const int start_frame, const int gpu_frames, unsigned char* const pic) { // todo: use the GPU to compute the requested frames (base the code on the previous project) } unsigned char* GPU_Init(const int gpu_frames, const int width) { unsigned char* d_pic; if (cudaSuccess != cudaMalloc((void **)&d_pic, gpu_frames * width * width * sizeof(unsigned char))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);} return d_pic; } void GPU_Exec(const int start_frame, const int gpu_frames, const int width, unsigned char* d_pic) { // todo: launch the kernel with ThreadsPerBlock and the appropriate number of blocks (do not wait for the kernel to finish) FractalKernel<<<((gpu_frames - start_frame) * width + ThreadsPerBlock-1/ ThreadsPerBlock,ThreadsPerBlock>>>(start_frame,gpu_frame,width, d_pic); } void GPU_Fini(const int gpu_frames, const int width, unsigned char* pic, unsigned char* d_pic) { // todo: copy the result from the device to the host and free the device memory }
83e8a64538b9b01e8a105aa9ecb5c76f2f686fdd.hip
// !!! This is a file automatically generated by hipify!!! // Chiranjit Mukherjee ([email protected]) #include <cstdlib> #include <fstream> #include <iostream> #include <stdio.h> #include <time.h> #include <math.h> #include <string.h> #define SSS // If uncommented, runs the Stochastic Shotgun Search //#define MCMC // If uncommented, runs the Markov chain Monte Carlo // Note: Need at lease one of the above two uncommented //#define CUDA // If uncommented, runs CUDA kernels on GPU #define MAXL 10 // Maximum number of mixture components that can be accommodated #ifdef SSS // SSS- runtime parameters #define maxLocalWastedIterations (n+p) // In paper, C #define climbDownStepSize 10 // In paper, D #define maxLocalJumpCount 10 // In paper, R #define MAXNGLOBALJUMP 2 // In paper, S / (C * R) // SSS- parameters for lists of models saved #define sizeOfFeatureSelectionList 20 // In paper, M #define sizeOfBestList 100 // Number of highest-score models to keep track of // SSS- #define LOCALMOVE_SFACTOR 0.001 #define GLOBALJUMP_SFACTOR 0.01 #define G_TO_XI int(L*p*(p-1)/(2*n)) // In paper, g #define XI_TO_SM 10 // In paper, h #define LOOKFORWARD 5 // In paper, f #define RGMS_T 2 // In paper, t // number of chains parameters #define N_INIT 3 // Number of points of initial models provided by the user in folder DATA/ #define TRY_EACH_INIT 1 // Number of times to restart from each given initial point #define N_RANDOM_RESTART 3 // Number of times to restart from random random initial points #define N_MODES_LIST_RESTART 3 // Number of times to start from #define maxNmodes ((TRY_EACH_INIT*N_INIT+N_RANDOM_RESTART+N_MODES_LIST_RESTART)+1) #endif #ifdef MCMC // MCMC- runtime parameters #define BURNIN 20000 // Burn-in #define N_ITR 100000 // Number of iterations to run after burn-in #ifdef CUDA #undef CUDA #endif #endif #define PI 3.1415926 #define log_2 0.693147180559945 #define log_pi_over_4 0.286182471462350 #define log_2_pi 1.837877066409345 #define NEG_INF -999999.0 #define myBool bool #define myInt short // Using short interger #define myIntFactor 2 #define intFactor 4 //#define Real double #define Real float // Using floating-point #define ISFLOAT 10 using namespace std; #include <gsl/gsl_integration.h> #include <gsl/gsl_sf.h> #define GSL_INTEGRATION_GRIDSIZE 1000 gsl_integration_workspace * w; gsl_function F; #include <gsl/gsl_randist.h> #define RANDOMSEED calendar_time // Define hyperparameters for the prior distribution of (mu, K | G) #define N0 0.01 #define DELTA0 3 #define JEFFREYS_PRIOR gsl_rng *rnd; #ifdef CUDA #include <hip/hip_runtime.h> #include <cutil.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #define BLOCKSIZ 32 #define SYNC __syncthreads() typedef struct { hipStream_t delete_stream; hipStream_t add_stream; myInt* d_in_delete; myInt* d_in_add; myInt* d_which_delete; myInt* d_which_add; myInt* h_in_delete; myInt* h_in_add; myInt* which_delete; myInt* which_add; int n_add, n_delete; } MGPUstuff; #else typedef struct { } MGPUstuff; #endif MGPUstuff* device; int n_devices; // Include source files #include "utilities.cpp" #ifndef GRAPH_CPP #include "graph.cpp" #endif #ifndef GWISH_CPP #include "gwish.cpp" #endif #ifndef DPMIXGGM_CPP #include "DPmixGGM.cpp" #endif #ifndef LISTS_CPP #include "DPmixGGM_Lists.cpp" #endif #ifndef SSSMOVES_CPP #include "DPmixGGM_SSSmoves.cpp" #endif #ifdef MCMC #include "DPmixGGM_MCMCmoves.cpp" #endif //////////////////////////////////////////////////////////////// START OF MAIN /////////////////////////////////////////////////////////////// int main (int argc, char *argv[]) { // declarations and initialisations int i,j,l,q,r,t; int L = 2; long int k; Real score; char initID[] = {'1','2','3'}; clock_t start, now; double cpu_time; // Initializing gsl random variate generators and integration tools const gsl_rng_type *T; time_t calendar_time; gsl_rng_env_setup(); T = gsl_rng_default; rnd = gsl_rng_alloc (T); calendar_time = time(NULL); gsl_rng_set(rnd,RANDOMSEED); #ifdef SSS unsigned long int seedset[maxNmodes]; for(i=0; i<maxNmodes; i++) { seedset[i] = gsl_rng_get (rnd); } #endif w = gsl_integration_workspace_alloc (GSL_INTEGRATION_GRIDSIZE); // DATA INPUT char datafile[50] = ""; strcpy(datafile,"DATA/"); strcat(datafile,argv[1]); strcat(datafile,".txt"); ifstream data(datafile); int n, p; data >> n; data >> p; printf("%d %d\n",n,p); Real *X = new Real[n*p]; for(i=0; i<n; i++) { for(j=0; j<p; j++) { data >> X[p*i+j]; } }; data.close(); // more declarations and initialisations int ee = p*(p-1)/2; ////////////////////////////////////////////////////////////// START OF SSS /////////////////////////////////////////////////////////////// #ifdef SSS // OUTPUT FILES char outfile[100] = ""; strcpy(outfile,"RES/"); #ifndef CUDA strcat(outfile,argv[1]); strcat(outfile,"_modes_CPU.txt"); ofstream outmodes(outfile); outmodes << n << " " << p << endl; #ifndef CUDA strcpy(outfile,"RES/"); strcat(outfile,argv[1]); strcat(outfile,"_best_CPU.txt"); ofstream outbest(outfile); outbest << n << " " << p << endl; #endif #else strcat(outfile,argv[1]); strcat(outfile,"_modes_GPU.txt"); ofstream outmodes(outfile); outmodes << n << " " << p << endl; #ifndef CUDA strcpy(outfile,"RES/"); strcat(outfile,argv[1]); strcat(outfile,"_best_GPU.txt"); ofstream outbest(outfile); outbest << n << " " << p << endl; #endif #endif // Initialisations State initstates[N_INIT+N_RANDOM_RESTART]; int* initstateID = new int[maxNmodes]; for(i=0; i<N_INIT; i++) { strcpy(datafile,"DATA/"); strcat(datafile,argv[1]); strcat(datafile,"_init"); strncat(datafile,&initID[i],1); strcat(datafile,".txt"); ifstream initfile(datafile); initstates[i] = new DPmixGGM(X,L,n,p,0.1,initfile); initfile.close(); initstateID[i] = i; } State state = new DPmixGGM(initstates[0]); State localBestState = new DPmixGGM(state); State globalBestState = new DPmixGGM(state); List featureList = new DPmixGGMlist (sizeOfFeatureSelectionList, n, p); List modesList = new DPmixGGMlist (maxNmodes, n, p); #ifdef CUDA List bestList = (List) NULL; #else List bestList = new DPmixGGMlist (sizeOfBestList, n, p); #endif ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef CUDA myInt req_GPU = atoi(argv[2]); hipGetDeviceCount(&n_devices); n_devices = ((n_devices <= req_GPU) ? n_devices : req_GPU); device = new MGPUstuff[n_devices]; size_t size_temp; hipError_t e1; for(r=0; r<n_devices; r++) { hipSetDevice(r); hipStreamCreate(&(device[r].delete_stream)); hipStreamCreate(&(device[r].add_stream)); size_temp = sizeof(myInt)*(3+p+p*p+2*ee); e1 = hipMalloc((void**) &(device[r].d_in_delete), size_temp); if(e1 != hipSuccess) { cout << "Error." << endl; exit(0); } size_temp = sizeof(myInt)*(3+4*p+2*p*p+2*ee); e1 = hipMalloc((void**) &(device[r].d_in_add), size_temp); if(e1 != hipSuccess) { cout << "Error." << endl; exit(0); } size_temp = sizeof(myInt)*ee; e1 = hipMalloc((void**) &(device[r].d_which_delete), size_temp); if(e1 != hipSuccess) { cout << "Error." << endl; exit(0); } size_temp = sizeof(myInt)*ee; e1 = hipMalloc((void**) &(device[r].d_which_add), size_temp); if(e1 != hipSuccess) { cout << "Error." << endl; exit(0); } device[r].h_in_delete = new myInt[3+p+p*p+2*ee]; device[r].h_in_add = new myInt[4+4*p+2*p*p+2*ee]; device[r].which_delete = new myInt[ee]; device[r].which_add = new myInt[ee]; } #endif // more declarations and initialisations bool globalMoveFlag = 0; myInt nmodes = 1; Real localBestScore = NEG_INF, globalBestScore = NEG_INF; gsl_rng_set(rnd,seedset[nmodes-1]); int wastedIterations = 0; int localJumpCount = 0, globalJumpCount = 0; int num_cases; long int num_allModels = 0; // initial xi scan num_cases += updateAllXis (1, state, bestList); L = state->L; score = state->plp; for(l=0; l<L; l++) { score += state->pll[l]; } printf("%ld %d %.4f %.4f %.4f %d %.4f %d %ld\n",k,state->L,score,localBestScore,globalBestScore,nmodes,cpu_time,num_cases,num_allModels); // start the stopwatch start = clock(); k = 0; while(nmodes<=maxNmodes) { k++; num_cases = 0; // LOCAL MOVES /////////////////////////////////////////////////////////////////////////////////////////////////////////////// if((k%G_TO_XI)) { num_cases += updateOneEdgeInEveryG (state->L, NULL, 0, state->graphlist, state->pll, NULL, state, bestList); } else { j = k/G_TO_XI; if(j%XI_TO_SM) { if(state->L>1) { num_cases += updateAllXis (1, state, bestList); num_cases += Merge (state, bestList, LOOKFORWARD, 0); } } else { num_cases += splitMerge(state, featureList, bestList, LOOKFORWARD, LOCALMOVE_SFACTOR, 0, 1, RGMS_T); } } // LOCAL MOVES /////////////////////////////////////////////////////////////////////////////////////////////////////////////// // MODE BREAK MOVES ////////////////////////////////////////////////////////////////////////////////////////////////////////// if ((wastedIterations > maxLocalWastedIterations) && (localJumpCount < maxLocalJumpCount)) { wastedIterations = 0; localJumpCount++; state->CopyState(localBestState); // local graph jump for(i=0; i<localJumpCount; i++) { num_cases += updateOneEdgeInEveryG (state->L, NULL, (i+1)*climbDownStepSize, state->graphlist, state->pll, NULL, state, bestList); } } // MODE BREAK MOVES ////////////////////////////////////////////////////////////////////////////////////////////////////////// // GLOBAL JUMP MOVES ///////////////////////////////////////////////////////////////////////////////////////////////////////// if(wastedIterations > maxLocalJumpCount*maxLocalWastedIterations) { if(globalJumpCount==MAXNGLOBALJUMP) { globalMoveFlag = 1; } else { wastedIterations = 0; globalJumpCount++; state->CopyState(globalBestState); localBestScore = NEG_INF; num_cases += globalJumpAllG (1, 1, LOOKFORWARD, GLOBALJUMP_SFACTOR, state, featureList, bestList); // larger graph jump state->plp = state->partitionlogPrior (state->L,state->xi,state->alpha); for(l=0; l<state->L; l++) { state->pll[l] = state->cluster_k_loglikelihood (l,state->xi,state->graphlist[l]); } } } // GLOBAL JUMP MOVES ///////////////////////////////////////////////////////////////////////////////////////////////////////// // SEARCH RESTART //////////////////////////////////////////////////////////////////////////////////////////////////////////// if(globalMoveFlag) { globalMoveFlag = 0; modesList->UpdateList(globalBestState); nmodes++; gsl_rng_set(rnd,seedset[nmodes-1]); start = clock(); k = 0; localBestScore = NEG_INF; globalBestScore = NEG_INF; featureList->FlushList(state); #ifndef CUDA if(nmodes>maxNmodes) { break; } #else if(nmodes>maxNmodes-1) { break; } #endif if (nmodes <= TRY_EACH_INIT*N_INIT) // analyse prescribed starting points { delete state; delete localBestState; delete globalBestState; strcpy(datafile,"DATA/"); strcat(datafile,argv[1]); strcat(datafile,"_init"); strncat(datafile,&initID[(nmodes-1)%N_INIT],1); strcat(datafile,".txt"); ifstream initfile(datafile); state = new DPmixGGM(X,L,n,p,0.1,initfile); initfile.close(); localBestState = new DPmixGGM(state); globalBestState = new DPmixGGM(state); num_cases += updateAllXis (1, state, bestList); L = state->L; score = state->plp; for(l=0; l<L; l++) { score += state->pll[l]; } } else if (nmodes <= (TRY_EACH_INIT*N_INIT+N_RANDOM_RESTART) && (N_RANDOM_RESTART>0)) // analyse renadom starting points { randomRestart (rand_myInt(MAXL-1)+2, state, 0.1); initstates[N_INIT-1+nmodes-TRY_EACH_INIT*N_INIT] = new DPmixGGM(state); initstateID[nmodes] = N_INIT-1+nmodes-TRY_EACH_INIT*N_INIT; num_cases += updateAllXis (1, state, bestList); L = state->L; score = state->plp; for(l=0; l<L; l++) { score += state->pll[l]; } } else if (nmodes <= (TRY_EACH_INIT*N_INIT+N_RANDOM_RESTART+N_MODES_LIST_RESTART)) { int maxI; Real maxScore = NEG_INF; for(i=0; i<(nmodes-1); i++) { if(modesList->score_list[i]>maxScore) { maxScore = modesList->score_list[i]; maxI = i; } }; //state->CopyState(initstates[1]); state->CopyState(initstates[initstateID[maxI]]); localBestState->CopyState(state); globalBestState->CopyState(state); num_cases += updateAllXis (1, state, bestList); L = state->L; score = state->plp; for(l=0; l<L; l++) { score += state->pll[l]; } } #ifndef CUDA else { bestList = new DPmixGGMlist (sizeOfBestList, n, p); int maxI; Real maxScore = NEG_INF; for(i=0; i<(nmodes-1); i++) { if(modesList->score_list[i]>maxScore) { maxScore = modesList->score_list[i]; maxI = i; } } gsl_rng_set(rnd,seedset[maxI]); //state->CopyState(initstates[1]); state->CopyState(initstates[initstateID[maxI]]); localBestState->CopyState(state); globalBestState->CopyState(state); num_cases += updateAllXis (1, state, bestList); L = state->L; score = state->plp; for(l=0; l<L; l++) { score += state->pll[l]; } } #endif } // SEARCH RESTART //////////////////////////////////////////////////////////////////////////////////////////////////////////// // SCORE RECORDING /////////////////////////////////////////////////////////////////////////////////////////////////////////// L = state->L; score = state->plp; for(l=0; l<L; l++) { score += state->pll[l]; } now = clock(); cpu_time = ((double) (now-start))/CLOCKS_PER_SEC; num_allModels += num_cases; printf("%ld %d %.4f %.4f %.4f %d %.4f %d %ld\n",k,state->L,score,localBestScore,globalBestScore,nmodes,cpu_time,num_cases,num_allModels); if(score > localBestScore) { localBestScore = score; wastedIterations = 0; localJumpCount = 0; localBestState->CopyState(state); } else { wastedIterations++; } if(score > globalBestScore) { globalBestScore = score; wastedIterations = 0; globalJumpCount = 0; globalBestState->CopyState(state); featureList->UpdateList(state); } else { wastedIterations++; } // SCORE RECORDING /////////////////////////////////////////////////////////////////////////////////////////////////////////// } // writing the lists modesList->WriteList (outmodes); #ifndef CUDA bestList->WriteList (outbest); #endif // cleanups #ifdef CUDA for(r=0; r<n_devices; r++) { hipSetDevice(r); hipFree(device[r].d_in_add); hipFree(device[r].d_in_delete); hipFree(device[r].d_which_add); hipFree(device[r].d_which_delete); delete[] device[r].h_in_delete; delete[] device[r].h_in_add; delete[] device[r].which_delete; delete[] device[r].which_add; } delete[] device; #endif outmodes.close(); #ifndef CUDA outbest.close(); #endif #endif ////////////////////////////////////////////////////////////// END OF SSS /////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////// START OF MCMC /////////////////////////////////////////////////////////////// #ifdef MCMC // OUTPUT FILES char outfile[100] = ""; strcpy(outfile,"RES/"); strcat(outfile,argv[1]); strcat(outfile,"_MAP"); strcat(outfile,argv[2]); strcat(outfile,".txt"); ofstream outMAP(outfile); strcpy(outfile,"RES/"); strcat(outfile,argv[1]); strcat(outfile,"_MCMCall"); strcat(outfile,argv[2]); strcat(outfile,".txt"); ofstream outMCMCall(outfile); outMAP << n << " " << p << endl; outMCMCall << n << " " << p << endl; // more declarations and initialisations int *cluster_mat = new int[n*n]; for(i=0; i<n*n; i++) { cluster_mat[i] = 0; }; int* edge_mat = new int[n*ee]; for(i=0; i<n*ee; i++) { edge_mat[i] = 0; } List MAPList = new DPmixGGMlist (1, n, p); Real lastBestScore = NEG_INF; State state; // start the stopwatch start = clock(); // Initializations strcpy(datafile,"DATA/"); strcat(datafile,argv[1]); strcat(datafile,"_init"); strncat(datafile,argv[2],1); strcat(datafile,".txt"); ifstream initfile(datafile); state = new DPmixGGM(X,L,n,p,0.1,initfile); initfile.close(); for(k=0; k<(BURNIN+N_ITR); k++) { MCMCUpdateXi(state); MCMCUpdateG (state); score = state->partitionlogPrior(state->L, state->xi, state->alpha); for(l=0; l<state->L; l++) { score += state->pll[l]; } now = clock(); cpu_time = ((double) (now-start))/CLOCKS_PER_SEC; printf("%ld %d %.4f %.4f\n",k,state->L,score,cpu_time); if(k>=BURNIN) { for(i=0; i<n; i++) { q = state->xi[i]; for(j=0; j<n; j++) { r = state->xi[j]; cluster_mat[i*n+j] += (q==r); } } for(i=0; i<n; i++) { t = 0; for(q=0; q<p-1; q++) { for(r=q+1; r<p; r++) { edge_mat[i*ee+t] += state->graphlist[state->xi[i]]->Edge[q][r]; t++; } } } if(score>lastBestScore) { MAPList->UpdateList(state); } } } MAPList->WriteList (outMAP); for(i=0; i<n*n; i++ ) { outMCMCall << Real(cluster_mat[i])/Real(N_ITR) << " "; }; outMCMCall << endl; for(i=0; i<n; i++) { for(j=0; j<ee; j++) { outMCMCall << Real(edge_mat[i*ee+j])/Real(N_ITR) << " "; }; outMCMCall << endl; } delete[] cluster_mat; delete[] edge_mat; outMAP.close(); outMCMCall.close(); #endif ////////////////////////////////////////////////////////////// END OF MCMC /////////////////////////////////////////////////////////////// // cleanups gsl_rng_free (rnd); delete[] X; gsl_integration_workspace_free (w); }
83e8a64538b9b01e8a105aa9ecb5c76f2f686fdd.cu
// Chiranjit Mukherjee ([email protected]) #include <cstdlib> #include <fstream> #include <iostream> #include <stdio.h> #include <time.h> #include <math.h> #include <string.h> #define SSS // If uncommented, runs the Stochastic Shotgun Search //#define MCMC // If uncommented, runs the Markov chain Monte Carlo // Note: Need at lease one of the above two uncommented //#define CUDA // If uncommented, runs CUDA kernels on GPU #define MAXL 10 // Maximum number of mixture components that can be accommodated #ifdef SSS // SSS- runtime parameters #define maxLocalWastedIterations (n+p) // In paper, C #define climbDownStepSize 10 // In paper, D #define maxLocalJumpCount 10 // In paper, R #define MAXNGLOBALJUMP 2 // In paper, S / (C * R) // SSS- parameters for lists of models saved #define sizeOfFeatureSelectionList 20 // In paper, M #define sizeOfBestList 100 // Number of highest-score models to keep track of // SSS- #define LOCALMOVE_SFACTOR 0.001 #define GLOBALJUMP_SFACTOR 0.01 #define G_TO_XI int(L*p*(p-1)/(2*n)) // In paper, g #define XI_TO_SM 10 // In paper, h #define LOOKFORWARD 5 // In paper, f #define RGMS_T 2 // In paper, t // number of chains parameters #define N_INIT 3 // Number of points of initial models provided by the user in folder DATA/ #define TRY_EACH_INIT 1 // Number of times to restart from each given initial point #define N_RANDOM_RESTART 3 // Number of times to restart from random random initial points #define N_MODES_LIST_RESTART 3 // Number of times to start from #define maxNmodes ((TRY_EACH_INIT*N_INIT+N_RANDOM_RESTART+N_MODES_LIST_RESTART)+1) #endif #ifdef MCMC // MCMC- runtime parameters #define BURNIN 20000 // Burn-in #define N_ITR 100000 // Number of iterations to run after burn-in #ifdef CUDA #undef CUDA #endif #endif #define PI 3.1415926 #define log_2 0.693147180559945 #define log_pi_over_4 0.286182471462350 #define log_2_pi 1.837877066409345 #define NEG_INF -999999.0 #define myBool bool #define myInt short // Using short interger #define myIntFactor 2 #define intFactor 4 //#define Real double #define Real float // Using floating-point #define ISFLOAT 10 using namespace std; #include <gsl/gsl_integration.h> #include <gsl/gsl_sf.h> #define GSL_INTEGRATION_GRIDSIZE 1000 gsl_integration_workspace * w; gsl_function F; #include <gsl/gsl_randist.h> #define RANDOMSEED calendar_time // Define hyperparameters for the prior distribution of (mu, K | G) #define N0 0.01 #define DELTA0 3 #define JEFFREYS_PRIOR gsl_rng *rnd; #ifdef CUDA #include <cuda.h> #include <cutil.h> #include <cuda_runtime_api.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define BLOCKSIZ 32 #define SYNC __syncthreads() typedef struct { cudaStream_t delete_stream; cudaStream_t add_stream; myInt* d_in_delete; myInt* d_in_add; myInt* d_which_delete; myInt* d_which_add; myInt* h_in_delete; myInt* h_in_add; myInt* which_delete; myInt* which_add; int n_add, n_delete; } MGPUstuff; #else typedef struct { } MGPUstuff; #endif MGPUstuff* device; int n_devices; // Include source files #include "utilities.cpp" #ifndef GRAPH_CPP #include "graph.cpp" #endif #ifndef GWISH_CPP #include "gwish.cpp" #endif #ifndef DPMIXGGM_CPP #include "DPmixGGM.cpp" #endif #ifndef LISTS_CPP #include "DPmixGGM_Lists.cpp" #endif #ifndef SSSMOVES_CPP #include "DPmixGGM_SSSmoves.cpp" #endif #ifdef MCMC #include "DPmixGGM_MCMCmoves.cpp" #endif //////////////////////////////////////////////////////////////// START OF MAIN /////////////////////////////////////////////////////////////// int main (int argc, char *argv[]) { // declarations and initialisations int i,j,l,q,r,t; int L = 2; long int k; Real score; char initID[] = {'1','2','3'}; clock_t start, now; double cpu_time; // Initializing gsl random variate generators and integration tools const gsl_rng_type *T; time_t calendar_time; gsl_rng_env_setup(); T = gsl_rng_default; rnd = gsl_rng_alloc (T); calendar_time = time(NULL); gsl_rng_set(rnd,RANDOMSEED); #ifdef SSS unsigned long int seedset[maxNmodes]; for(i=0; i<maxNmodes; i++) { seedset[i] = gsl_rng_get (rnd); } #endif w = gsl_integration_workspace_alloc (GSL_INTEGRATION_GRIDSIZE); // DATA INPUT char datafile[50] = ""; strcpy(datafile,"DATA/"); strcat(datafile,argv[1]); strcat(datafile,".txt"); ifstream data(datafile); int n, p; data >> n; data >> p; printf("%d %d\n",n,p); Real *X = new Real[n*p]; for(i=0; i<n; i++) { for(j=0; j<p; j++) { data >> X[p*i+j]; } }; data.close(); // more declarations and initialisations int ee = p*(p-1)/2; ////////////////////////////////////////////////////////////// START OF SSS /////////////////////////////////////////////////////////////// #ifdef SSS // OUTPUT FILES char outfile[100] = ""; strcpy(outfile,"RES/"); #ifndef CUDA strcat(outfile,argv[1]); strcat(outfile,"_modes_CPU.txt"); ofstream outmodes(outfile); outmodes << n << " " << p << endl; #ifndef CUDA strcpy(outfile,"RES/"); strcat(outfile,argv[1]); strcat(outfile,"_best_CPU.txt"); ofstream outbest(outfile); outbest << n << " " << p << endl; #endif #else strcat(outfile,argv[1]); strcat(outfile,"_modes_GPU.txt"); ofstream outmodes(outfile); outmodes << n << " " << p << endl; #ifndef CUDA strcpy(outfile,"RES/"); strcat(outfile,argv[1]); strcat(outfile,"_best_GPU.txt"); ofstream outbest(outfile); outbest << n << " " << p << endl; #endif #endif // Initialisations State initstates[N_INIT+N_RANDOM_RESTART]; int* initstateID = new int[maxNmodes]; for(i=0; i<N_INIT; i++) { strcpy(datafile,"DATA/"); strcat(datafile,argv[1]); strcat(datafile,"_init"); strncat(datafile,&initID[i],1); strcat(datafile,".txt"); ifstream initfile(datafile); initstates[i] = new DPmixGGM(X,L,n,p,0.1,initfile); initfile.close(); initstateID[i] = i; } State state = new DPmixGGM(initstates[0]); State localBestState = new DPmixGGM(state); State globalBestState = new DPmixGGM(state); List featureList = new DPmixGGMlist (sizeOfFeatureSelectionList, n, p); List modesList = new DPmixGGMlist (maxNmodes, n, p); #ifdef CUDA List bestList = (List) NULL; #else List bestList = new DPmixGGMlist (sizeOfBestList, n, p); #endif ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef CUDA myInt req_GPU = atoi(argv[2]); cudaGetDeviceCount(&n_devices); n_devices = ((n_devices <= req_GPU) ? n_devices : req_GPU); device = new MGPUstuff[n_devices]; size_t size_temp; cudaError_t e1; for(r=0; r<n_devices; r++) { cudaSetDevice(r); cudaStreamCreate(&(device[r].delete_stream)); cudaStreamCreate(&(device[r].add_stream)); size_temp = sizeof(myInt)*(3+p+p*p+2*ee); e1 = cudaMalloc((void**) &(device[r].d_in_delete), size_temp); if(e1 != cudaSuccess) { cout << "Error." << endl; exit(0); } size_temp = sizeof(myInt)*(3+4*p+2*p*p+2*ee); e1 = cudaMalloc((void**) &(device[r].d_in_add), size_temp); if(e1 != cudaSuccess) { cout << "Error." << endl; exit(0); } size_temp = sizeof(myInt)*ee; e1 = cudaMalloc((void**) &(device[r].d_which_delete), size_temp); if(e1 != cudaSuccess) { cout << "Error." << endl; exit(0); } size_temp = sizeof(myInt)*ee; e1 = cudaMalloc((void**) &(device[r].d_which_add), size_temp); if(e1 != cudaSuccess) { cout << "Error." << endl; exit(0); } device[r].h_in_delete = new myInt[3+p+p*p+2*ee]; device[r].h_in_add = new myInt[4+4*p+2*p*p+2*ee]; device[r].which_delete = new myInt[ee]; device[r].which_add = new myInt[ee]; } #endif // more declarations and initialisations bool globalMoveFlag = 0; myInt nmodes = 1; Real localBestScore = NEG_INF, globalBestScore = NEG_INF; gsl_rng_set(rnd,seedset[nmodes-1]); int wastedIterations = 0; int localJumpCount = 0, globalJumpCount = 0; int num_cases; long int num_allModels = 0; // initial xi scan num_cases += updateAllXis (1, state, bestList); L = state->L; score = state->plp; for(l=0; l<L; l++) { score += state->pll[l]; } printf("%ld %d %.4f %.4f %.4f %d %.4f %d %ld\n",k,state->L,score,localBestScore,globalBestScore,nmodes,cpu_time,num_cases,num_allModels); // start the stopwatch start = clock(); k = 0; while(nmodes<=maxNmodes) { k++; num_cases = 0; // LOCAL MOVES /////////////////////////////////////////////////////////////////////////////////////////////////////////////// if((k%G_TO_XI)) { num_cases += updateOneEdgeInEveryG (state->L, NULL, 0, state->graphlist, state->pll, NULL, state, bestList); } else { j = k/G_TO_XI; if(j%XI_TO_SM) { if(state->L>1) { num_cases += updateAllXis (1, state, bestList); num_cases += Merge (state, bestList, LOOKFORWARD, 0); } } else { num_cases += splitMerge(state, featureList, bestList, LOOKFORWARD, LOCALMOVE_SFACTOR, 0, 1, RGMS_T); } } // LOCAL MOVES /////////////////////////////////////////////////////////////////////////////////////////////////////////////// // MODE BREAK MOVES ////////////////////////////////////////////////////////////////////////////////////////////////////////// if ((wastedIterations > maxLocalWastedIterations) && (localJumpCount < maxLocalJumpCount)) { wastedIterations = 0; localJumpCount++; state->CopyState(localBestState); // local graph jump for(i=0; i<localJumpCount; i++) { num_cases += updateOneEdgeInEveryG (state->L, NULL, (i+1)*climbDownStepSize, state->graphlist, state->pll, NULL, state, bestList); } } // MODE BREAK MOVES ////////////////////////////////////////////////////////////////////////////////////////////////////////// // GLOBAL JUMP MOVES ///////////////////////////////////////////////////////////////////////////////////////////////////////// if(wastedIterations > maxLocalJumpCount*maxLocalWastedIterations) { if(globalJumpCount==MAXNGLOBALJUMP) { globalMoveFlag = 1; } else { wastedIterations = 0; globalJumpCount++; state->CopyState(globalBestState); localBestScore = NEG_INF; num_cases += globalJumpAllG (1, 1, LOOKFORWARD, GLOBALJUMP_SFACTOR, state, featureList, bestList); // larger graph jump state->plp = state->partitionlogPrior (state->L,state->xi,state->alpha); for(l=0; l<state->L; l++) { state->pll[l] = state->cluster_k_loglikelihood (l,state->xi,state->graphlist[l]); } } } // GLOBAL JUMP MOVES ///////////////////////////////////////////////////////////////////////////////////////////////////////// // SEARCH RESTART //////////////////////////////////////////////////////////////////////////////////////////////////////////// if(globalMoveFlag) { globalMoveFlag = 0; modesList->UpdateList(globalBestState); nmodes++; gsl_rng_set(rnd,seedset[nmodes-1]); start = clock(); k = 0; localBestScore = NEG_INF; globalBestScore = NEG_INF; featureList->FlushList(state); #ifndef CUDA if(nmodes>maxNmodes) { break; } #else if(nmodes>maxNmodes-1) { break; } #endif if (nmodes <= TRY_EACH_INIT*N_INIT) // analyse prescribed starting points { delete state; delete localBestState; delete globalBestState; strcpy(datafile,"DATA/"); strcat(datafile,argv[1]); strcat(datafile,"_init"); strncat(datafile,&initID[(nmodes-1)%N_INIT],1); strcat(datafile,".txt"); ifstream initfile(datafile); state = new DPmixGGM(X,L,n,p,0.1,initfile); initfile.close(); localBestState = new DPmixGGM(state); globalBestState = new DPmixGGM(state); num_cases += updateAllXis (1, state, bestList); L = state->L; score = state->plp; for(l=0; l<L; l++) { score += state->pll[l]; } } else if (nmodes <= (TRY_EACH_INIT*N_INIT+N_RANDOM_RESTART) && (N_RANDOM_RESTART>0)) // analyse renadom starting points { randomRestart (rand_myInt(MAXL-1)+2, state, 0.1); initstates[N_INIT-1+nmodes-TRY_EACH_INIT*N_INIT] = new DPmixGGM(state); initstateID[nmodes] = N_INIT-1+nmodes-TRY_EACH_INIT*N_INIT; num_cases += updateAllXis (1, state, bestList); L = state->L; score = state->plp; for(l=0; l<L; l++) { score += state->pll[l]; } } else if (nmodes <= (TRY_EACH_INIT*N_INIT+N_RANDOM_RESTART+N_MODES_LIST_RESTART)) { int maxI; Real maxScore = NEG_INF; for(i=0; i<(nmodes-1); i++) { if(modesList->score_list[i]>maxScore) { maxScore = modesList->score_list[i]; maxI = i; } }; //state->CopyState(initstates[1]); state->CopyState(initstates[initstateID[maxI]]); localBestState->CopyState(state); globalBestState->CopyState(state); num_cases += updateAllXis (1, state, bestList); L = state->L; score = state->plp; for(l=0; l<L; l++) { score += state->pll[l]; } } #ifndef CUDA else { bestList = new DPmixGGMlist (sizeOfBestList, n, p); int maxI; Real maxScore = NEG_INF; for(i=0; i<(nmodes-1); i++) { if(modesList->score_list[i]>maxScore) { maxScore = modesList->score_list[i]; maxI = i; } } gsl_rng_set(rnd,seedset[maxI]); //state->CopyState(initstates[1]); state->CopyState(initstates[initstateID[maxI]]); localBestState->CopyState(state); globalBestState->CopyState(state); num_cases += updateAllXis (1, state, bestList); L = state->L; score = state->plp; for(l=0; l<L; l++) { score += state->pll[l]; } } #endif } // SEARCH RESTART //////////////////////////////////////////////////////////////////////////////////////////////////////////// // SCORE RECORDING /////////////////////////////////////////////////////////////////////////////////////////////////////////// L = state->L; score = state->plp; for(l=0; l<L; l++) { score += state->pll[l]; } now = clock(); cpu_time = ((double) (now-start))/CLOCKS_PER_SEC; num_allModels += num_cases; printf("%ld %d %.4f %.4f %.4f %d %.4f %d %ld\n",k,state->L,score,localBestScore,globalBestScore,nmodes,cpu_time,num_cases,num_allModels); if(score > localBestScore) { localBestScore = score; wastedIterations = 0; localJumpCount = 0; localBestState->CopyState(state); } else { wastedIterations++; } if(score > globalBestScore) { globalBestScore = score; wastedIterations = 0; globalJumpCount = 0; globalBestState->CopyState(state); featureList->UpdateList(state); } else { wastedIterations++; } // SCORE RECORDING /////////////////////////////////////////////////////////////////////////////////////////////////////////// } // writing the lists modesList->WriteList (outmodes); #ifndef CUDA bestList->WriteList (outbest); #endif // cleanups #ifdef CUDA for(r=0; r<n_devices; r++) { cudaSetDevice(r); cudaFree(device[r].d_in_add); cudaFree(device[r].d_in_delete); cudaFree(device[r].d_which_add); cudaFree(device[r].d_which_delete); delete[] device[r].h_in_delete; delete[] device[r].h_in_add; delete[] device[r].which_delete; delete[] device[r].which_add; } delete[] device; #endif outmodes.close(); #ifndef CUDA outbest.close(); #endif #endif ////////////////////////////////////////////////////////////// END OF SSS /////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////// START OF MCMC /////////////////////////////////////////////////////////////// #ifdef MCMC // OUTPUT FILES char outfile[100] = ""; strcpy(outfile,"RES/"); strcat(outfile,argv[1]); strcat(outfile,"_MAP"); strcat(outfile,argv[2]); strcat(outfile,".txt"); ofstream outMAP(outfile); strcpy(outfile,"RES/"); strcat(outfile,argv[1]); strcat(outfile,"_MCMCall"); strcat(outfile,argv[2]); strcat(outfile,".txt"); ofstream outMCMCall(outfile); outMAP << n << " " << p << endl; outMCMCall << n << " " << p << endl; // more declarations and initialisations int *cluster_mat = new int[n*n]; for(i=0; i<n*n; i++) { cluster_mat[i] = 0; }; int* edge_mat = new int[n*ee]; for(i=0; i<n*ee; i++) { edge_mat[i] = 0; } List MAPList = new DPmixGGMlist (1, n, p); Real lastBestScore = NEG_INF; State state; // start the stopwatch start = clock(); // Initializations strcpy(datafile,"DATA/"); strcat(datafile,argv[1]); strcat(datafile,"_init"); strncat(datafile,argv[2],1); strcat(datafile,".txt"); ifstream initfile(datafile); state = new DPmixGGM(X,L,n,p,0.1,initfile); initfile.close(); for(k=0; k<(BURNIN+N_ITR); k++) { MCMCUpdateXi(state); MCMCUpdateG (state); score = state->partitionlogPrior(state->L, state->xi, state->alpha); for(l=0; l<state->L; l++) { score += state->pll[l]; } now = clock(); cpu_time = ((double) (now-start))/CLOCKS_PER_SEC; printf("%ld %d %.4f %.4f\n",k,state->L,score,cpu_time); if(k>=BURNIN) { for(i=0; i<n; i++) { q = state->xi[i]; for(j=0; j<n; j++) { r = state->xi[j]; cluster_mat[i*n+j] += (q==r); } } for(i=0; i<n; i++) { t = 0; for(q=0; q<p-1; q++) { for(r=q+1; r<p; r++) { edge_mat[i*ee+t] += state->graphlist[state->xi[i]]->Edge[q][r]; t++; } } } if(score>lastBestScore) { MAPList->UpdateList(state); } } } MAPList->WriteList (outMAP); for(i=0; i<n*n; i++ ) { outMCMCall << Real(cluster_mat[i])/Real(N_ITR) << " "; }; outMCMCall << endl; for(i=0; i<n; i++) { for(j=0; j<ee; j++) { outMCMCall << Real(edge_mat[i*ee+j])/Real(N_ITR) << " "; }; outMCMCall << endl; } delete[] cluster_mat; delete[] edge_mat; outMAP.close(); outMCMCall.close(); #endif ////////////////////////////////////////////////////////////// END OF MCMC /////////////////////////////////////////////////////////////// // cleanups gsl_rng_free (rnd); delete[] X; gsl_integration_workspace_free (w); }
9e0f2e77461fe543705b25dbccc05fc9a7c1e348.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated s Tue Dec 17 13:18:45 2013 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_s #define NB 16 //////////////////////////////////////////////////////////////////////////////// // grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd. // lower indicates blocks in lower triangle of grid, including diagonal. // lower blocks cover left side of matrix, including diagonal. // upper blocks swap block indices (x,y) and shift by grid width (or width-1) // to cover right side of matrix. // [ A00 A01 A02 ] [ A00 . . | . . ] // [ A10 A11 A12 ] [ A10 A11 . | . . ] // grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ] // [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ] // [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ] // // See stranspose_inplace_even for description of threads. __global__ void stranspose_inplace_odd( int n, float *matrix, int lda ) { __shared__ float sA[ NB ][ NB+1 ]; __shared__ float sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x >= blockIdx.y); int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1)); int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y )); ii *= NB; jj *= NB; float *A = matrix + ii+i + (jj+j)*lda; if( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { float *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } //////////////////////////////////////////////////////////////////////////////// // grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even. // lower indicates blocks in strictly lower triangle of grid, excluding diagonal. // lower blocks shift up by one to cover left side of matrix including diagonal. // upper blocks swap block indices (x,y) and shift by grid width // to cover right side of matrix. // [ A00 A01 ] [ A10 . | . . ] // [ A10 A11 ] [ A20 A21 | . . ] // grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ] // [ A30 A31 ] [ A40 A41 | A01 A11 ] // [ A40 A41 ] // // Each block is NB x NB threads. // For non-diagonal block A, block B is symmetric block. // Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed, // syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j). // Threads outside the matrix do not touch memory. __global__ void stranspose_inplace_even( int n, float *matrix, int lda ) { __shared__ float sA[ NB ][ NB+1 ]; __shared__ float sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x > blockIdx.y); int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y)); int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y)); ii *= NB; jj *= NB; float *A = matrix + ii+i + (jj+j)*lda; if( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { float *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } //////////////////////////////////////////////////////////////////////////////// extern "C" void magmablas_stranspose_inplace( magma_int_t n, float *A, magma_int_t lda ) { dim3 threads( NB, NB ); int nblock = (n + NB - 1)/NB; // need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix. // block assignment differs depending on whether nblock is odd or even. if( nblock % 2 == 1 ) { dim3 grid( nblock, (nblock+1)/2 ); hipLaunchKernelGGL(( stranspose_inplace_odd), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda ); } else { dim3 grid( nblock+1, nblock/2 ); hipLaunchKernelGGL(( stranspose_inplace_even), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda ); } }
9e0f2e77461fe543705b25dbccc05fc9a7c1e348.cu
/* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated s Tue Dec 17 13:18:45 2013 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_s #define NB 16 //////////////////////////////////////////////////////////////////////////////// // grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd. // lower indicates blocks in lower triangle of grid, including diagonal. // lower blocks cover left side of matrix, including diagonal. // upper blocks swap block indices (x,y) and shift by grid width (or width-1) // to cover right side of matrix. // [ A00 A01 A02 ] [ A00 . . | . . ] // [ A10 A11 A12 ] [ A10 A11 . | . . ] // grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ] // [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ] // [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ] // // See stranspose_inplace_even for description of threads. __global__ void stranspose_inplace_odd( int n, float *matrix, int lda ) { __shared__ float sA[ NB ][ NB+1 ]; __shared__ float sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x >= blockIdx.y); int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1)); int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y )); ii *= NB; jj *= NB; float *A = matrix + ii+i + (jj+j)*lda; if( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { float *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } //////////////////////////////////////////////////////////////////////////////// // grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even. // lower indicates blocks in strictly lower triangle of grid, excluding diagonal. // lower blocks shift up by one to cover left side of matrix including diagonal. // upper blocks swap block indices (x,y) and shift by grid width // to cover right side of matrix. // [ A00 A01 ] [ A10 . | . . ] // [ A10 A11 ] [ A20 A21 | . . ] // grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ] // [ A30 A31 ] [ A40 A41 | A01 A11 ] // [ A40 A41 ] // // Each block is NB x NB threads. // For non-diagonal block A, block B is symmetric block. // Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed, // syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j). // Threads outside the matrix do not touch memory. __global__ void stranspose_inplace_even( int n, float *matrix, int lda ) { __shared__ float sA[ NB ][ NB+1 ]; __shared__ float sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x > blockIdx.y); int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y)); int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y)); ii *= NB; jj *= NB; float *A = matrix + ii+i + (jj+j)*lda; if( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { float *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } //////////////////////////////////////////////////////////////////////////////// extern "C" void magmablas_stranspose_inplace( magma_int_t n, float *A, magma_int_t lda ) { dim3 threads( NB, NB ); int nblock = (n + NB - 1)/NB; // need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix. // block assignment differs depending on whether nblock is odd or even. if( nblock % 2 == 1 ) { dim3 grid( nblock, (nblock+1)/2 ); stranspose_inplace_odd<<< grid, threads, 0, magma_stream >>>( n, A, lda ); } else { dim3 grid( nblock+1, nblock/2 ); stranspose_inplace_even<<< grid, threads, 0, magma_stream >>>( n, A, lda ); } }
fb628a4d7c0994460b1cfbca64577cff055a1140.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_field.cu" extern "C" { __global__ void field_init(int m){ cuda_field_init(m); } __global__ void make_unit(cuda_field_element* B, int n){ int thidX = (blockIdx.x * blockDim.x) + threadIdx.x; int thidY = (blockIdx.y * blockDim.y) + threadIdx.y; if(thidX < n && thidY < n){ B[thidX*n+thidY] = (thidX == thidY); } } __global__ void find_nonzero(cuda_field_element* A, int n, int i, int* k){ int thidX = (blockIdx.x * blockDim.x) + threadIdx.x; if(i < thidX && thidX < n) if(A[thidX*n+i] != 0) *k = thidX; } __global__ void swap(cuda_field_element* M, int n, int i, int k){ int thidX = (blockIdx.x * blockDim.x) + threadIdx.x; if(thidX < n){ cuda_field_element v = M[i*n+thidX]; M[i*n+thidX] = M[k*n+thidX]; M[k*n+thidX] = v; } } __global__ void fix_row(cuda_field_element* M, int n, int i, cuda_field_element mul){ int thidX = (blockIdx.x * blockDim.x) + threadIdx.x; if(thidX < n){ M[i*n+thidX] = M[i*n+thidX] * mul; } } __global__ void update_column(cuda_field_element* A, cuda_field_element* i_th_column, int n, int i){ int thidX = (blockIdx.x * blockDim.x) + threadIdx.x; if(thidX < n){ i_th_column[thidX] = A[thidX*n + i]; } } __global__ void fix_column(cuda_field_element* M, cuda_field_element* i_th_column, int n, int i){ int thidX = (blockIdx.x * blockDim.x) + threadIdx.x; int thidY = (blockIdx.y * blockDim.y) + threadIdx.y; //__shared__ cuda_field_element P[32]; if(thidY != i && thidX < n && thidY < n){ //P[threadIdx.x] = M[i*n+thidX]; M[thidY*n+thidX] -= i_th_column[thidY]*M[i*n+thidX];//P[threadIdx.x]; } } }
fb628a4d7c0994460b1cfbca64577cff055a1140.cu
#include "cuda_field.cu" extern "C" { __global__ void field_init(int m){ cuda_field_init(m); } __global__ void make_unit(cuda_field_element* B, int n){ int thidX = (blockIdx.x * blockDim.x) + threadIdx.x; int thidY = (blockIdx.y * blockDim.y) + threadIdx.y; if(thidX < n && thidY < n){ B[thidX*n+thidY] = (thidX == thidY); } } __global__ void find_nonzero(cuda_field_element* A, int n, int i, int* k){ int thidX = (blockIdx.x * blockDim.x) + threadIdx.x; if(i < thidX && thidX < n) if(A[thidX*n+i] != 0) *k = thidX; } __global__ void swap(cuda_field_element* M, int n, int i, int k){ int thidX = (blockIdx.x * blockDim.x) + threadIdx.x; if(thidX < n){ cuda_field_element v = M[i*n+thidX]; M[i*n+thidX] = M[k*n+thidX]; M[k*n+thidX] = v; } } __global__ void fix_row(cuda_field_element* M, int n, int i, cuda_field_element mul){ int thidX = (blockIdx.x * blockDim.x) + threadIdx.x; if(thidX < n){ M[i*n+thidX] = M[i*n+thidX] * mul; } } __global__ void update_column(cuda_field_element* A, cuda_field_element* i_th_column, int n, int i){ int thidX = (blockIdx.x * blockDim.x) + threadIdx.x; if(thidX < n){ i_th_column[thidX] = A[thidX*n + i]; } } __global__ void fix_column(cuda_field_element* M, cuda_field_element* i_th_column, int n, int i){ int thidX = (blockIdx.x * blockDim.x) + threadIdx.x; int thidY = (blockIdx.y * blockDim.y) + threadIdx.y; //__shared__ cuda_field_element P[32]; if(thidY != i && thidX < n && thidY < n){ //P[threadIdx.x] = M[i*n+thidX]; M[thidY*n+thidX] -= i_th_column[thidY]*M[i*n+thidX];//P[threadIdx.x]; } } }
ca3e2cce8910ed17bb1163ea37805a94a1daa93c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdlib.h> #define N (2048*2048) #define THREADS_PER_BLOCK 512 using namespace std; __global__ void mykernal() { // Run on device and called by host } __global__ void add(int *a, int *b, int *c, int n) { // *c = *a + *b; int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < n) c[index] = a[index] + b[index]; } void random_ints(int* x, int size) { for (int i = 0; i < size; i++) { x[i] = rand() % 10; } } int main() { int nDevices; hipGetDeviceCount(&nDevices); cout << nDevices << endl; for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); hipMalloc((void**)&d_a, size); hipMalloc((void**)&d_b, size); hipMalloc((void**)&d_c, size); a = (int *)malloc(size); random_ints(a, N); b = (int *)malloc(size); random_ints(b, N); c = (int *)malloc(size); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(N/THREADS_PER_BLOCK),dim3(THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c, N); hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); for (int i = 0;i < 10; i++) { printf("a[%d]=%d , b[%d]=%d, c[%d]=%d\n",i,a[i],i,b[i],i,c[i]); } free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
ca3e2cce8910ed17bb1163ea37805a94a1daa93c.cu
#include <iostream> #include <stdlib.h> #define N (2048*2048) #define THREADS_PER_BLOCK 512 using namespace std; __global__ void mykernal() { // Run on device and called by host } __global__ void add(int *a, int *b, int *c, int n) { // *c = *a + *b; int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < n) c[index] = a[index] + b[index]; } void random_ints(int* x, int size) { for (int i = 0; i < size; i++) { x[i] = rand() % 10; } } int main() { int nDevices; cudaGetDeviceCount(&nDevices); cout << nDevices << endl; for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, size); a = (int *)malloc(size); random_ints(a, N); b = (int *)malloc(size); random_ints(b, N); c = (int *)malloc(size); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); add<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a, d_b, d_c, N); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); for (int i = 0;i < 10; i++) { printf("a[%d]=%d , b[%d]=%d, c[%d]=%d\n",i,a[i],i,b[i],i,c[i]); } free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
aacc2e8726aca6e1960caa5db88f566e9603cd5b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define COALESCED_NUM 16 #define blockDimX 256 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 16 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define C(y,x) C[(y)*WIDTH_C+(x)] __global__ void vectormul(float * A, float * B, float * C, int width) { __shared__ float shared_0[16]; float sum_0; float sum_1; float sum_2; float sum_3; float sum_4; float sum_5; float sum_6; float sum_7; float sum_8; float sum_9; float sum_10; float sum_11; float sum_12; float sum_13; float sum_14; float sum_15; float a_0; float a_1; float a_2; float a_3; float a_4; float a_5; float a_6; float a_7; float a_8; float a_9; float a_10; float a_11; float a_12; float a_13; float a_14; float a_15; float b; sum_0=0; sum_1=0; sum_2=0; sum_3=0; sum_4=0; sum_5=0; sum_6=0; sum_7=0; sum_8=0; sum_9=0; sum_10=0; sum_11=0; sum_12=0; sum_13=0; sum_14=0; sum_15=0; if ((tidx<16)) { { shared_0[(tidx+0)]=A[(coalesced_idy+tidx)]; } } __syncthreads(); a_0=shared_0[((((bidy*16)+tidy)+0)+(( - 1)*coalesced_idy))]; a_1=shared_0[((((bidy*16)+tidy)+1)+(( - 1)*coalesced_idy))]; a_2=shared_0[((((bidy*16)+tidy)+2)+(( - 1)*coalesced_idy))]; a_3=shared_0[((((bidy*16)+tidy)+3)+(( - 1)*coalesced_idy))]; a_4=shared_0[((((bidy*16)+tidy)+4)+(( - 1)*coalesced_idy))]; a_5=shared_0[((((bidy*16)+tidy)+5)+(( - 1)*coalesced_idy))]; a_6=shared_0[((((bidy*16)+tidy)+6)+(( - 1)*coalesced_idy))]; a_7=shared_0[((((bidy*16)+tidy)+7)+(( - 1)*coalesced_idy))]; a_8=shared_0[((((bidy*16)+tidy)+8)+(( - 1)*coalesced_idy))]; a_9=shared_0[((((bidy*16)+tidy)+9)+(( - 1)*coalesced_idy))]; a_10=shared_0[((((bidy*16)+tidy)+10)+(( - 1)*coalesced_idy))]; a_11=shared_0[((((bidy*16)+tidy)+11)+(( - 1)*coalesced_idy))]; a_12=shared_0[((((bidy*16)+tidy)+12)+(( - 1)*coalesced_idy))]; a_13=shared_0[((((bidy*16)+tidy)+13)+(( - 1)*coalesced_idy))]; a_14=shared_0[((((bidy*16)+tidy)+14)+(( - 1)*coalesced_idy))]; a_15=shared_0[((((bidy*16)+tidy)+15)+(( - 1)*coalesced_idy))]; __syncthreads(); __syncthreads(); { b=B[idx]; } sum_0+=(a_0*b); sum_1+=(a_1*b); sum_2+=(a_2*b); sum_3+=(a_3*b); sum_4+=(a_4*b); sum_5+=(a_5*b); sum_6+=(a_6*b); sum_7+=(a_7*b); sum_8+=(a_8*b); sum_9+=(a_9*b); sum_10+=(a_10*b); sum_11+=(a_11*b); sum_12+=(a_12*b); sum_13+=(a_13*b); sum_14+=(a_14*b); sum_15+=(a_15*b); { C((((bidy*16)+tidy)+0), idx)+=sum_0; } { C((((bidy*16)+tidy)+1), idx)+=sum_1; } { C((((bidy*16)+tidy)+2), idx)+=sum_2; } { C((((bidy*16)+tidy)+3), idx)+=sum_3; } { C((((bidy*16)+tidy)+4), idx)+=sum_4; } { C((((bidy*16)+tidy)+5), idx)+=sum_5; } { C((((bidy*16)+tidy)+6), idx)+=sum_6; } { C((((bidy*16)+tidy)+7), idx)+=sum_7; } { C((((bidy*16)+tidy)+8), idx)+=sum_8; } { C((((bidy*16)+tidy)+9), idx)+=sum_9; } { C((((bidy*16)+tidy)+10), idx)+=sum_10; } { C((((bidy*16)+tidy)+11), idx)+=sum_11; } { C((((bidy*16)+tidy)+12), idx)+=sum_12; } { C((((bidy*16)+tidy)+13), idx)+=sum_13; } { C((((bidy*16)+tidy)+14), idx)+=sum_14; } { C((((bidy*16)+tidy)+15), idx)+=sum_15; } }
aacc2e8726aca6e1960caa5db88f566e9603cd5b.cu
#define COALESCED_NUM 16 #define blockDimX 256 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 16 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define C(y,x) C[(y)*WIDTH_C+(x)] __global__ void vectormul(float * A, float * B, float * C, int width) { __shared__ float shared_0[16]; float sum_0; float sum_1; float sum_2; float sum_3; float sum_4; float sum_5; float sum_6; float sum_7; float sum_8; float sum_9; float sum_10; float sum_11; float sum_12; float sum_13; float sum_14; float sum_15; float a_0; float a_1; float a_2; float a_3; float a_4; float a_5; float a_6; float a_7; float a_8; float a_9; float a_10; float a_11; float a_12; float a_13; float a_14; float a_15; float b; sum_0=0; sum_1=0; sum_2=0; sum_3=0; sum_4=0; sum_5=0; sum_6=0; sum_7=0; sum_8=0; sum_9=0; sum_10=0; sum_11=0; sum_12=0; sum_13=0; sum_14=0; sum_15=0; if ((tidx<16)) { { shared_0[(tidx+0)]=A[(coalesced_idy+tidx)]; } } __syncthreads(); a_0=shared_0[((((bidy*16)+tidy)+0)+(( - 1)*coalesced_idy))]; a_1=shared_0[((((bidy*16)+tidy)+1)+(( - 1)*coalesced_idy))]; a_2=shared_0[((((bidy*16)+tidy)+2)+(( - 1)*coalesced_idy))]; a_3=shared_0[((((bidy*16)+tidy)+3)+(( - 1)*coalesced_idy))]; a_4=shared_0[((((bidy*16)+tidy)+4)+(( - 1)*coalesced_idy))]; a_5=shared_0[((((bidy*16)+tidy)+5)+(( - 1)*coalesced_idy))]; a_6=shared_0[((((bidy*16)+tidy)+6)+(( - 1)*coalesced_idy))]; a_7=shared_0[((((bidy*16)+tidy)+7)+(( - 1)*coalesced_idy))]; a_8=shared_0[((((bidy*16)+tidy)+8)+(( - 1)*coalesced_idy))]; a_9=shared_0[((((bidy*16)+tidy)+9)+(( - 1)*coalesced_idy))]; a_10=shared_0[((((bidy*16)+tidy)+10)+(( - 1)*coalesced_idy))]; a_11=shared_0[((((bidy*16)+tidy)+11)+(( - 1)*coalesced_idy))]; a_12=shared_0[((((bidy*16)+tidy)+12)+(( - 1)*coalesced_idy))]; a_13=shared_0[((((bidy*16)+tidy)+13)+(( - 1)*coalesced_idy))]; a_14=shared_0[((((bidy*16)+tidy)+14)+(( - 1)*coalesced_idy))]; a_15=shared_0[((((bidy*16)+tidy)+15)+(( - 1)*coalesced_idy))]; __syncthreads(); __syncthreads(); { b=B[idx]; } sum_0+=(a_0*b); sum_1+=(a_1*b); sum_2+=(a_2*b); sum_3+=(a_3*b); sum_4+=(a_4*b); sum_5+=(a_5*b); sum_6+=(a_6*b); sum_7+=(a_7*b); sum_8+=(a_8*b); sum_9+=(a_9*b); sum_10+=(a_10*b); sum_11+=(a_11*b); sum_12+=(a_12*b); sum_13+=(a_13*b); sum_14+=(a_14*b); sum_15+=(a_15*b); { C((((bidy*16)+tidy)+0), idx)+=sum_0; } { C((((bidy*16)+tidy)+1), idx)+=sum_1; } { C((((bidy*16)+tidy)+2), idx)+=sum_2; } { C((((bidy*16)+tidy)+3), idx)+=sum_3; } { C((((bidy*16)+tidy)+4), idx)+=sum_4; } { C((((bidy*16)+tidy)+5), idx)+=sum_5; } { C((((bidy*16)+tidy)+6), idx)+=sum_6; } { C((((bidy*16)+tidy)+7), idx)+=sum_7; } { C((((bidy*16)+tidy)+8), idx)+=sum_8; } { C((((bidy*16)+tidy)+9), idx)+=sum_9; } { C((((bidy*16)+tidy)+10), idx)+=sum_10; } { C((((bidy*16)+tidy)+11), idx)+=sum_11; } { C((((bidy*16)+tidy)+12), idx)+=sum_12; } { C((((bidy*16)+tidy)+13), idx)+=sum_13; } { C((((bidy*16)+tidy)+14), idx)+=sum_14; } { C((((bidy*16)+tidy)+15), idx)+=sum_15; } }
ab776fcae4392987c088df033d505acb72705143.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; float color = 0.0f; for (int i_y = 0; i_y < filterWidth; i_y++) { for (int i_x = 0; i_x < filterWidth; i_x++) { int c_x = thread_2D_pos.x + i_x - filterWidth / 2; int c_y = thread_2D_pos.y + i_y - filterWidth / 2; c_x = min(max(c_x, 0), numCols - 1); c_y = min(max(c_y, 0), numRows - 1); float filterValue = filter[i_y*filterWidth + i_x]; color += filterValue*static_cast<float>(inputChannel[c_y * numCols + c_x]); } } outputChannel[thread_1D_pos] = color; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x; greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y; blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(32, 32); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(numCols / blockSize.x +1, numRows / blockSize.y + 1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels << <gridSize, blockSize >> > (d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. //Red channel gaussian_blur << <gridSize, blockSize >> >(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //Green channel gaussian_blur << <gridSize, blockSize >> >(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //Blue channel gaussian_blur << <gridSize, blockSize >> >(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels << <gridSize, blockSize >> >(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); }
ab776fcae4392987c088df033d505acb72705143.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; float color = 0.0f; for (int i_y = 0; i_y < filterWidth; i_y++) { for (int i_x = 0; i_x < filterWidth; i_x++) { int c_x = thread_2D_pos.x + i_x - filterWidth / 2; int c_y = thread_2D_pos.y + i_y - filterWidth / 2; c_x = min(max(c_x, 0), numCols - 1); c_y = min(max(c_y, 0), numRows - 1); float filterValue = filter[i_y*filterWidth + i_x]; color += filterValue*static_cast<float>(inputChannel[c_y * numCols + c_x]); } } outputChannel[thread_1D_pos] = color; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x; greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y; blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(32, 32); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(numCols / blockSize.x +1, numRows / blockSize.y + 1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels << <gridSize, blockSize >> > (d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. //Red channel gaussian_blur << <gridSize, blockSize >> >(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //Green channel gaussian_blur << <gridSize, blockSize >> >(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //Blue channel gaussian_blur << <gridSize, blockSize >> >(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels << <gridSize, blockSize >> >(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); }
89984f25c122692a54dda308773e5ca11cd40207.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "lab3.h" #include <cstdio> __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } __global__ void SimpleClone(const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt * yt + xt; if (yt < ht && xt < wt && mask[curt] > 127.0f) { const int yb = oy + yt, xb = ox + xt; const int curb = wb * yb + xb; if (0 <= yb && yb < hb && 0 <= xb && xb < wb) { output[curb * 3 + 0] = target[curt * 3 + 0]; output[curb * 3 + 1] = target[curt * 3 + 1]; output[curb * 3 + 2] = target[curt * 3 + 2]; } } } __global__ void CalcAB(const float *background, const float *target, const float *mask, unsigned *A, float *B, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int yb = oy + yt, xb = ox + xt; const int curt = wt * yt + xt, curb = wb * yb + xb; if (!(yt < ht && xt < wt && mask[curt] > 127.0 && 0 <= yb && yb < hb && 0 <= xb && xb < wb)) return; // A: 6543210 // DURLNNN int a = 0; float b0 = 0.0, b1 = 0.0, b2 = 0.0; #define DoNeighbor1(condb, condt, diffb, difft, b_off) \ if (condb) { \ a++; \ if (condt) { \ b0 += target[curt * 3 + 0] - target[(curt + difft) * 3 + 0]; \ b1 += target[curt * 3 + 1] - target[(curt + difft) * 3 + 1]; \ b2 += target[curt * 3 + 2] - target[(curt + difft) * 3 + 2]; \ if (mask[curt + difft] < 128.0) { \ b0 += background[(curb + diffb) * 3 + 0]; \ b1 += background[(curb + diffb) * 3 + 1]; \ b2 += background[(curb + diffb) * 3 + 2]; \ } else { \ a |= (1 << b_off); \ } \ } else { \ b0 += background[(curb + diffb) * 3 + 0]; \ b1 += background[(curb + diffb) * 3 + 1]; \ b2 += background[(curb + diffb) * 3 + 2]; \ } \ } \ DoNeighbor1(xb > 0 , xt > 0 , -1, -1, 3); DoNeighbor1(xb < wb - 1, xt < wt - 1, 1, 1, 4); DoNeighbor1(yb > 0 , yt > 0 , -wb, -wt, 5); DoNeighbor1(yb < hb - 1, yt < ht - 1, wb, wt, 6); A[curt] = a; B[curt] = b0; B[curt + wt * ht] = b1; B[curt + wt * ht * 2] = b2; } __global__ void JacobiIteration(const unsigned *A, const float *B, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int yb = oy + yt, xb = ox + xt; const int curt = wt * yt + xt, curb = wb * yb + xb; if (!(yt < ht && xt < wt && mask[curt] > 127.0 && 0 <= yb && yb < hb && 0 <= xb && xb < wb)) return; unsigned a = A[curt]; float ax = 0.0; if (a & (1 << 3)) ax -= output[curb - 1]; if (a & (1 << 4)) ax -= output[curb + 1]; if (a & (1 << 5)) ax -= output[curb - wb]; if (a & (1 << 6)) ax -= output[curb + wb]; output[curb] = (B[curt] - ax) / (a & 0x7); } template <typename T> __global__ void Reshape1(const T *input, T *output, const int w, const int h) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (!(y < h && x < w)) return; const int cur = w * y + x; output[cur] = input[cur * 3]; output[cur + w * h] = input[cur * 3 + 1]; output[cur + w * h * 2] = input[cur * 3 + 2]; } template <typename T> __global__ void Reshape2(const T *input, T *output, const int w, const int h) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (!(y < h && x < w)) return; const int cur = w * y + x; output[cur * 3] = input[cur]; output[cur * 3 + 1] = input[cur + w * h]; output[cur * 3 + 2] = input[cur + w * h * 2]; } const dim3 BS(32, 16); const int ITER = 20000; void PoissonImageCloning(const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox) { unsigned *A = nullptr; float *B = nullptr, *buf = nullptr; hipMalloc(&A, wt * ht * sizeof(unsigned)); hipMalloc(&B, wt * ht * sizeof(float) * 3); hipMalloc(&buf, wb * hb * sizeof(float) * 3); hipLaunchKernelGGL(( CalcAB), dim3(dim3(CeilDiv(wt, BS.x), CeilDiv(ht, BS.y))), dim3(BS), 0, 0, background, target, mask, A, B, wb, hb, wt, ht, oy, ox ); hipLaunchKernelGGL(( Reshape1), dim3(dim3(CeilDiv(wb, BS.x), CeilDiv(hb, BS.y))), dim3(BS), 0, 0, background, buf, wb, hb ); for (int i = 0; i < ITER; i++) { hipDeviceSynchronize(); hipLaunchKernelGGL(( JacobiIteration), dim3(dim3(CeilDiv(wt, BS.x), CeilDiv(ht, BS.y))), dim3(BS), 0, 0, A, B, mask, buf, wb, hb, wt, ht, oy, ox ); hipLaunchKernelGGL(( JacobiIteration), dim3(dim3(CeilDiv(wt, BS.x), CeilDiv(ht, BS.y))), dim3(BS), 0, 0, A, B + wt * ht, mask, buf + wb * hb, wb, hb, wt, ht, oy, ox ); hipLaunchKernelGGL(( JacobiIteration), dim3(dim3(CeilDiv(wt, BS.x), CeilDiv(ht, BS.y))), dim3(BS), 0, 0, A, B + wt * ht * 2, mask, buf + wb * hb * 2, wb, hb, wt, ht, oy, ox ); } hipDeviceSynchronize(); hipLaunchKernelGGL(( Reshape2), dim3(dim3(CeilDiv(wb, BS.x), CeilDiv(hb, BS.y))), dim3(BS), 0, 0, buf, output, wb, hb ); hipFree(A); hipFree(B); hipFree(buf); }
89984f25c122692a54dda308773e5ca11cd40207.cu
#include "lab3.h" #include <cstdio> __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } __global__ void SimpleClone(const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt * yt + xt; if (yt < ht && xt < wt && mask[curt] > 127.0f) { const int yb = oy + yt, xb = ox + xt; const int curb = wb * yb + xb; if (0 <= yb && yb < hb && 0 <= xb && xb < wb) { output[curb * 3 + 0] = target[curt * 3 + 0]; output[curb * 3 + 1] = target[curt * 3 + 1]; output[curb * 3 + 2] = target[curt * 3 + 2]; } } } __global__ void CalcAB(const float *background, const float *target, const float *mask, unsigned *A, float *B, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int yb = oy + yt, xb = ox + xt; const int curt = wt * yt + xt, curb = wb * yb + xb; if (!(yt < ht && xt < wt && mask[curt] > 127.0 && 0 <= yb && yb < hb && 0 <= xb && xb < wb)) return; // A: 6543210 // DURLNNN int a = 0; float b0 = 0.0, b1 = 0.0, b2 = 0.0; #define DoNeighbor1(condb, condt, diffb, difft, b_off) \ if (condb) { \ a++; \ if (condt) { \ b0 += target[curt * 3 + 0] - target[(curt + difft) * 3 + 0]; \ b1 += target[curt * 3 + 1] - target[(curt + difft) * 3 + 1]; \ b2 += target[curt * 3 + 2] - target[(curt + difft) * 3 + 2]; \ if (mask[curt + difft] < 128.0) { \ b0 += background[(curb + diffb) * 3 + 0]; \ b1 += background[(curb + diffb) * 3 + 1]; \ b2 += background[(curb + diffb) * 3 + 2]; \ } else { \ a |= (1 << b_off); \ } \ } else { \ b0 += background[(curb + diffb) * 3 + 0]; \ b1 += background[(curb + diffb) * 3 + 1]; \ b2 += background[(curb + diffb) * 3 + 2]; \ } \ } \ DoNeighbor1(xb > 0 , xt > 0 , -1, -1, 3); DoNeighbor1(xb < wb - 1, xt < wt - 1, 1, 1, 4); DoNeighbor1(yb > 0 , yt > 0 , -wb, -wt, 5); DoNeighbor1(yb < hb - 1, yt < ht - 1, wb, wt, 6); A[curt] = a; B[curt] = b0; B[curt + wt * ht] = b1; B[curt + wt * ht * 2] = b2; } __global__ void JacobiIteration(const unsigned *A, const float *B, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int yb = oy + yt, xb = ox + xt; const int curt = wt * yt + xt, curb = wb * yb + xb; if (!(yt < ht && xt < wt && mask[curt] > 127.0 && 0 <= yb && yb < hb && 0 <= xb && xb < wb)) return; unsigned a = A[curt]; float ax = 0.0; if (a & (1 << 3)) ax -= output[curb - 1]; if (a & (1 << 4)) ax -= output[curb + 1]; if (a & (1 << 5)) ax -= output[curb - wb]; if (a & (1 << 6)) ax -= output[curb + wb]; output[curb] = (B[curt] - ax) / (a & 0x7); } template <typename T> __global__ void Reshape1(const T *input, T *output, const int w, const int h) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (!(y < h && x < w)) return; const int cur = w * y + x; output[cur] = input[cur * 3]; output[cur + w * h] = input[cur * 3 + 1]; output[cur + w * h * 2] = input[cur * 3 + 2]; } template <typename T> __global__ void Reshape2(const T *input, T *output, const int w, const int h) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (!(y < h && x < w)) return; const int cur = w * y + x; output[cur * 3] = input[cur]; output[cur * 3 + 1] = input[cur + w * h]; output[cur * 3 + 2] = input[cur + w * h * 2]; } const dim3 BS(32, 16); const int ITER = 20000; void PoissonImageCloning(const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox) { unsigned *A = nullptr; float *B = nullptr, *buf = nullptr; cudaMalloc(&A, wt * ht * sizeof(unsigned)); cudaMalloc(&B, wt * ht * sizeof(float) * 3); cudaMalloc(&buf, wb * hb * sizeof(float) * 3); CalcAB<<<dim3(CeilDiv(wt, BS.x), CeilDiv(ht, BS.y)), BS>>>( background, target, mask, A, B, wb, hb, wt, ht, oy, ox ); Reshape1<<<dim3(CeilDiv(wb, BS.x), CeilDiv(hb, BS.y)), BS>>>( background, buf, wb, hb ); for (int i = 0; i < ITER; i++) { cudaDeviceSynchronize(); JacobiIteration<<<dim3(CeilDiv(wt, BS.x), CeilDiv(ht, BS.y)), BS>>>( A, B, mask, buf, wb, hb, wt, ht, oy, ox ); JacobiIteration<<<dim3(CeilDiv(wt, BS.x), CeilDiv(ht, BS.y)), BS>>>( A, B + wt * ht, mask, buf + wb * hb, wb, hb, wt, ht, oy, ox ); JacobiIteration<<<dim3(CeilDiv(wt, BS.x), CeilDiv(ht, BS.y)), BS>>>( A, B + wt * ht * 2, mask, buf + wb * hb * 2, wb, hb, wt, ht, oy, ox ); } cudaDeviceSynchronize(); Reshape2<<<dim3(CeilDiv(wb, BS.x), CeilDiv(hb, BS.y)), BS>>>( buf, output, wb, hb ); cudaFree(A); cudaFree(B); cudaFree(buf); }
0152bc8353bcf5830bc2ba29e2403f9b39698976.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "imagesHandler.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <algorithm> #include <cfloat> #include <chrono> #include <random> #include <vector> #include <stdexcept> #include <string> struct Data { explicit Data(int size) : size(size), bytes(size * sizeof(float)){ hipMalloc(&x, bytes); hipMalloc(&y, bytes); hipMalloc(&z, bytes); hipMalloc(&assignments, bytes); } Data(int size, std::vector<float>& h_x, std::vector<float>& h_y,std::vector<float>& h_z,std::vector<float>& h_assignments): size(size),bytes(size*sizeof(float)){ hipMalloc(&x, bytes); hipMalloc(&y, bytes); hipMalloc(&z, bytes); hipMalloc(&assignments, bytes); hipMemcpy(x, h_x.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(y, h_y.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(z, h_z.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(assignments, h_assignments.data(), bytes, hipMemcpyHostToDevice); } ~Data() { hipFree(x); hipFree(y); hipFree(z); hipFree(assignments); } void clear() { hipMemset(x, 0, bytes); hipMemset(y, 0, bytes); hipMemset(z, 0, bytes); hipMemset(assignments, 0, bytes); } float* x{nullptr}; float* y{nullptr}; float* z{nullptr}; float* assignments{nullptr}; int size{0}; int bytes{0}; }; //function to easily compute l2 distance, can be quickly updated with more dimensions adding parameters __device__ float squared_l2_distance(float x_1, float y_1, float z_1, float x_2, float y_2, float z_2) { return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2) + (z_1 - z_2) * (z_1 - z_2); } //1)Compute the best distances between the 3 dimensional points in data_x,y,z //2)if is the last iteration store the best cluster of each point in the data_assignments vector as a float id (0,1...) //3)compute the new clusters means __global__ void assign_clusters(const float* __restrict__ data_x, const float* __restrict__ data_y, const float* __restrict__ data_z, float* data_assignments, int data_size, const float* __restrict__ means_x, const float* __restrict__ means_y, const float* __restrict__ means_z, float* __restrict__ new_sums_x, float* __restrict__ new_sums_y, float* __restrict__ new_sums_z, int numberOfCluster, int* __restrict__ counts, bool save) { //With M threads per block a unique index for each thread is given by:int index = threadIdx.x + blockIdx.x * M; //Where M is the size of the block of threads; i.e.,blockDim.x extern __shared__ float shared_means[]; const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; //first k threads copy over the cluster means. if (threadIdx.x < numberOfCluster) { shared_means[threadIdx.x] = means_x[threadIdx.x]; shared_means[numberOfCluster + threadIdx.x] = means_y[threadIdx.x]; shared_means[numberOfCluster*2 + threadIdx.x] = means_z[threadIdx.x]; } // Wait for those k threads. __syncthreads(); const float x = data_x[index]; const float y = data_y[index]; const float z = data_z[index]; float best_distance = FLT_MAX; int best_cluster=0; for (int cluster = 0; cluster < numberOfCluster; ++cluster) { const float distance =squared_l2_distance(x, y, z, shared_means[cluster],shared_means[numberOfCluster + cluster],shared_means[numberOfCluster*2 + cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; if (save){ data_assignments[index]=best_cluster; } } } atomicAdd(&new_sums_x[best_cluster], x); atomicAdd(&new_sums_y[best_cluster], y); atomicAdd(&new_sums_z[best_cluster], z); atomicAdd(&counts[best_cluster], 1); } // Each thread is one cluster, which just recomputes its coordinates as the mean of all points // assigned to it simply assigning the new passed means and dividing for the clusters number element __global__ void compute_new_means(float* __restrict__ means_x, float* __restrict__ means_y, float* __restrict__ means_z, const float* __restrict__ new_sum_x, const float* __restrict__ new_sum_y, const float* __restrict__ new_sum_z, const int* __restrict__ counts ) { const int cluster = threadIdx.x; // Threshold count to turn 0/0 into 0/1. const int count = max(1, counts[cluster]); means_x[cluster] = new_sum_x[cluster] / count; means_y[cluster] = new_sum_y[cluster] / count; means_z[cluster] = new_sum_z[cluster] / count; } int main(int argc, char **argi) { //Image Handler creation imagesHandler handler; //Input params acqisition && Image opening by CImg and dimension acquisition std::vector<int> params = handler.inputParamAcquisition(argi); int iterations = params[0]; int numberOfClusters = params[1]; int columns = params[2]; int rows = params[3]; //Data array initialization std::vector<float> h_x(rows * columns); std::vector<float> h_y(rows * columns); std::vector<float> h_z(rows * columns); std::vector<float> assignments(rows * columns); //Data array population handler.dataAcquisition(h_x, h_y, h_z); Data d_data(h_x.size(), h_x, h_y, h_z,assignments); //Random first cluster means selections std::random_device seed; std::mt19937 rng(seed()); std::shuffle(h_x.begin(), h_x.end(), rng); std::shuffle(h_y.begin(), h_y.end(), rng); std::shuffle(h_z.begin(), h_z.end(), rng); Data d_means(numberOfClusters, h_x, h_y, h_z, assignments); Data d_sums(numberOfClusters); //GPU initialization int* d_counts; hipMalloc(&d_counts, numberOfClusters * sizeof(int)); hipMemset(d_counts, 0, numberOfClusters * sizeof(int)); int number_of_elements = h_x.size(); const int threads = 1024; const int blocks = (number_of_elements + threads - 1) / threads; const int shared_memory = d_means.bytes * 3; //boolean variable to saving assignments during the last iteration bool save = false; std::cout<< "\n\n image processing...\n\n"; //clock initialization std::clock_t start; double duration; start = std::clock(); //KMEANS for (size_t iteration = 0; iteration < iterations; ++iteration) { hipMemset(d_counts, 0, numberOfClusters * sizeof(int)); d_sums.clear(); //last iteration saving if(iteration == iterations -1){ save = true; } hipLaunchKernelGGL(( assign_clusters), dim3(blocks), dim3(threads), shared_memory, 0, d_data.x, d_data.y, d_data.z, d_data.assignments, d_data.size, d_means.x, d_means.y, d_means.z, d_sums.x, d_sums.y, d_sums.z, numberOfClusters, d_counts, save); hipDeviceSynchronize(); hipLaunchKernelGGL(( compute_new_means), dim3(1), dim3(numberOfClusters), 0, 0, d_means.x, d_means.y, d_means.z, d_sums.x, d_sums.y, d_sums.z, d_counts ); hipDeviceSynchronize(); } duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC; std::cout<< "PROCESSING TIME: "<< duration << " s" <<'\n'; //Processed data acquisition to coloring output image float* h_best; h_best = (float*)malloc(number_of_elements*sizeof(float)); hipMemcpy(h_best,d_data.assignments, number_of_elements*sizeof(float), hipMemcpyDeviceToHost); float* finalmeanx; float* finalmeany; float* finalmeanz; finalmeanx = (float*)malloc(numberOfClusters*sizeof(float)); finalmeany = (float*)malloc(numberOfClusters*sizeof(float)); finalmeanz = (float*)malloc(numberOfClusters*sizeof(float)); hipMemcpy(finalmeanx, d_means.x, numberOfClusters*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(finalmeany, d_means.y, numberOfClusters*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(finalmeanz, d_means.z, numberOfClusters*sizeof(float),hipMemcpyDeviceToHost); std::vector<int> clustColorR(numberOfClusters); std::vector<int> clustColorG(numberOfClusters); std::vector<int> clustColorB(numberOfClusters); for (int cluster = 0; cluster < numberOfClusters; cluster++){ clustColorR[cluster]=(int)finalmeanx[cluster]; clustColorG[cluster]=(int)finalmeany[cluster]; clustColorB[cluster]=(int)finalmeanz[cluster]; } int* assignedPixels; assignedPixels = (int*)malloc(number_of_elements*sizeof(int)); for(int i=0; i<number_of_elements; i++){ assignedPixels[i]=(int)h_best[i]; } handler.disp(assignedPixels, clustColorR, clustColorG, clustColorB); }
0152bc8353bcf5830bc2ba29e2403f9b39698976.cu
#include "imagesHandler.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <algorithm> #include <cfloat> #include <chrono> #include <random> #include <vector> #include <stdexcept> #include <string> struct Data { explicit Data(int size) : size(size), bytes(size * sizeof(float)){ cudaMalloc(&x, bytes); cudaMalloc(&y, bytes); cudaMalloc(&z, bytes); cudaMalloc(&assignments, bytes); } Data(int size, std::vector<float>& h_x, std::vector<float>& h_y,std::vector<float>& h_z,std::vector<float>& h_assignments): size(size),bytes(size*sizeof(float)){ cudaMalloc(&x, bytes); cudaMalloc(&y, bytes); cudaMalloc(&z, bytes); cudaMalloc(&assignments, bytes); cudaMemcpy(x, h_x.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(y, h_y.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(z, h_z.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(assignments, h_assignments.data(), bytes, cudaMemcpyHostToDevice); } ~Data() { cudaFree(x); cudaFree(y); cudaFree(z); cudaFree(assignments); } void clear() { cudaMemset(x, 0, bytes); cudaMemset(y, 0, bytes); cudaMemset(z, 0, bytes); cudaMemset(assignments, 0, bytes); } float* x{nullptr}; float* y{nullptr}; float* z{nullptr}; float* assignments{nullptr}; int size{0}; int bytes{0}; }; //function to easily compute l2 distance, can be quickly updated with more dimensions adding parameters __device__ float squared_l2_distance(float x_1, float y_1, float z_1, float x_2, float y_2, float z_2) { return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2) + (z_1 - z_2) * (z_1 - z_2); } //1)Compute the best distances between the 3 dimensional points in data_x,y,z //2)if is the last iteration store the best cluster of each point in the data_assignments vector as a float id (0,1...) //3)compute the new clusters means __global__ void assign_clusters(const float* __restrict__ data_x, const float* __restrict__ data_y, const float* __restrict__ data_z, float* data_assignments, int data_size, const float* __restrict__ means_x, const float* __restrict__ means_y, const float* __restrict__ means_z, float* __restrict__ new_sums_x, float* __restrict__ new_sums_y, float* __restrict__ new_sums_z, int numberOfCluster, int* __restrict__ counts, bool save) { //With M threads per block a unique index for each thread is given by:int index = threadIdx.x + blockIdx.x * M; //Where M is the size of the block of threads; i.e.,blockDim.x extern __shared__ float shared_means[]; const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; //first k threads copy over the cluster means. if (threadIdx.x < numberOfCluster) { shared_means[threadIdx.x] = means_x[threadIdx.x]; shared_means[numberOfCluster + threadIdx.x] = means_y[threadIdx.x]; shared_means[numberOfCluster*2 + threadIdx.x] = means_z[threadIdx.x]; } // Wait for those k threads. __syncthreads(); const float x = data_x[index]; const float y = data_y[index]; const float z = data_z[index]; float best_distance = FLT_MAX; int best_cluster=0; for (int cluster = 0; cluster < numberOfCluster; ++cluster) { const float distance =squared_l2_distance(x, y, z, shared_means[cluster],shared_means[numberOfCluster + cluster],shared_means[numberOfCluster*2 + cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; if (save){ data_assignments[index]=best_cluster; } } } atomicAdd(&new_sums_x[best_cluster], x); atomicAdd(&new_sums_y[best_cluster], y); atomicAdd(&new_sums_z[best_cluster], z); atomicAdd(&counts[best_cluster], 1); } // Each thread is one cluster, which just recomputes its coordinates as the mean of all points // assigned to it simply assigning the new passed means and dividing for the clusters number element __global__ void compute_new_means(float* __restrict__ means_x, float* __restrict__ means_y, float* __restrict__ means_z, const float* __restrict__ new_sum_x, const float* __restrict__ new_sum_y, const float* __restrict__ new_sum_z, const int* __restrict__ counts ) { const int cluster = threadIdx.x; // Threshold count to turn 0/0 into 0/1. const int count = max(1, counts[cluster]); means_x[cluster] = new_sum_x[cluster] / count; means_y[cluster] = new_sum_y[cluster] / count; means_z[cluster] = new_sum_z[cluster] / count; } int main(int argc, char **argi) { //Image Handler creation imagesHandler handler; //Input params acqisition && Image opening by CImg and dimension acquisition std::vector<int> params = handler.inputParamAcquisition(argi); int iterations = params[0]; int numberOfClusters = params[1]; int columns = params[2]; int rows = params[3]; //Data array initialization std::vector<float> h_x(rows * columns); std::vector<float> h_y(rows * columns); std::vector<float> h_z(rows * columns); std::vector<float> assignments(rows * columns); //Data array population handler.dataAcquisition(h_x, h_y, h_z); Data d_data(h_x.size(), h_x, h_y, h_z,assignments); //Random first cluster means selections std::random_device seed; std::mt19937 rng(seed()); std::shuffle(h_x.begin(), h_x.end(), rng); std::shuffle(h_y.begin(), h_y.end(), rng); std::shuffle(h_z.begin(), h_z.end(), rng); Data d_means(numberOfClusters, h_x, h_y, h_z, assignments); Data d_sums(numberOfClusters); //GPU initialization int* d_counts; cudaMalloc(&d_counts, numberOfClusters * sizeof(int)); cudaMemset(d_counts, 0, numberOfClusters * sizeof(int)); int number_of_elements = h_x.size(); const int threads = 1024; const int blocks = (number_of_elements + threads - 1) / threads; const int shared_memory = d_means.bytes * 3; //boolean variable to saving assignments during the last iteration bool save = false; std::cout<< "\n\n image processing...\n\n"; //clock initialization std::clock_t start; double duration; start = std::clock(); //KMEANS for (size_t iteration = 0; iteration < iterations; ++iteration) { cudaMemset(d_counts, 0, numberOfClusters * sizeof(int)); d_sums.clear(); //last iteration saving if(iteration == iterations -1){ save = true; } assign_clusters<<<blocks, threads, shared_memory>>>(d_data.x, d_data.y, d_data.z, d_data.assignments, d_data.size, d_means.x, d_means.y, d_means.z, d_sums.x, d_sums.y, d_sums.z, numberOfClusters, d_counts, save); cudaDeviceSynchronize(); compute_new_means<<<1, numberOfClusters>>>(d_means.x, d_means.y, d_means.z, d_sums.x, d_sums.y, d_sums.z, d_counts ); cudaDeviceSynchronize(); } duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC; std::cout<< "PROCESSING TIME: "<< duration << " s" <<'\n'; //Processed data acquisition to coloring output image float* h_best; h_best = (float*)malloc(number_of_elements*sizeof(float)); cudaMemcpy(h_best,d_data.assignments, number_of_elements*sizeof(float), cudaMemcpyDeviceToHost); float* finalmeanx; float* finalmeany; float* finalmeanz; finalmeanx = (float*)malloc(numberOfClusters*sizeof(float)); finalmeany = (float*)malloc(numberOfClusters*sizeof(float)); finalmeanz = (float*)malloc(numberOfClusters*sizeof(float)); cudaMemcpy(finalmeanx, d_means.x, numberOfClusters*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(finalmeany, d_means.y, numberOfClusters*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(finalmeanz, d_means.z, numberOfClusters*sizeof(float),cudaMemcpyDeviceToHost); std::vector<int> clustColorR(numberOfClusters); std::vector<int> clustColorG(numberOfClusters); std::vector<int> clustColorB(numberOfClusters); for (int cluster = 0; cluster < numberOfClusters; cluster++){ clustColorR[cluster]=(int)finalmeanx[cluster]; clustColorG[cluster]=(int)finalmeany[cluster]; clustColorB[cluster]=(int)finalmeanz[cluster]; } int* assignedPixels; assignedPixels = (int*)malloc(number_of_elements*sizeof(int)); for(int i=0; i<number_of_elements; i++){ assignedPixels[i]=(int)h_best[i]; } handler.disp(assignedPixels, clustColorR, clustColorG, clustColorB); }
9034a7fe70dfd8279324540d897926d2dead29ef.hip
// !!! This is a file automatically generated by hipify!!! // // Created by klaus on 09.07.21. // #include "ImmediateFunctionsThrust.cuh" #include "StreamFunctionHelper.cuh" #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> enum ImmOp { ADD, MUL, DIVI, DIVII, SUBI, SUBII, MODI, MODII }; struct immFunctor { size_t imm; ImmOp op; immFunctor(size_t _imm, ImmOp _op) { imm = _imm; op = _op; } __host__ __device__ int operator()(int input) const { switch (op) { case ADD: return input + imm; case MUL: return input * imm; case SUBI: return input - imm; case SUBII: return imm - input; case DIVI: return input / imm; case DIVII: return imm / input; case MODI: return input % imm; case MODII: return imm % input; } } }; shared_ptr<GPUIntStream> exec_imm_op(shared_ptr<GPUIntStream> input, size_t imm, ImmOp op) { // prepare result shared_ptr<GPUIntStream> result = make_shared<GPUIntStream>(); hipMalloc((void**) &result->device_timestamp, input->size*sizeof(int)); hipMalloc((void**) &result->device_values, input->size*sizeof(int)); result->device_offset = input->device_offset; // copy timestamps auto input_ts = thrust::device_pointer_cast(input->device_timestamp); auto result_ts = thrust::device_pointer_cast(result->device_timestamp); thrust::copy_n(input_ts, input->size, result_ts); // get pointers and transform stream auto offset = thrust::device_pointer_cast(input->device_offset); auto input_vals_start = thrust::device_pointer_cast(input->device_values+*offset); auto input_vals_end = thrust::device_pointer_cast(input->device_values+ input->size); auto result_vals = thrust::device_pointer_cast(input->device_values + *offset); immFunctor f(imm, op); thrust::transform(input_vals_start, input_vals_end, result_vals, f); return result; } std::shared_ptr<GPUIntStream> add_imm_thrust(shared_ptr<GPUIntStream> input, size_t imm) { return exec_imm_op(input, imm, ADD); } shared_ptr<GPUIntStream> mul_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm) { return exec_imm_op(input, imm, MUL); } shared_ptr<GPUIntStream> sub_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm) { return exec_imm_op(input, imm, SUBI); } shared_ptr<GPUIntStream> sub_inv_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm) { return exec_imm_op(input, imm, SUBII); } shared_ptr<GPUIntStream> div_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm) { return exec_imm_op(input, imm, DIVI); } shared_ptr<GPUIntStream> div_inv_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm){ return exec_imm_op(input, imm, DIVII); } shared_ptr<GPUIntStream> mod_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm){ return exec_imm_op(input, imm, MODI); } shared_ptr<GPUIntStream> mod_inv_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm){ return exec_imm_op(input, imm, MODII); }
9034a7fe70dfd8279324540d897926d2dead29ef.cu
// // Created by klaus on 09.07.21. // #include "ImmediateFunctionsThrust.cuh" #include "StreamFunctionHelper.cuh" #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> enum ImmOp { ADD, MUL, DIVI, DIVII, SUBI, SUBII, MODI, MODII }; struct immFunctor { size_t imm; ImmOp op; immFunctor(size_t _imm, ImmOp _op) { imm = _imm; op = _op; } __host__ __device__ int operator()(int input) const { switch (op) { case ADD: return input + imm; case MUL: return input * imm; case SUBI: return input - imm; case SUBII: return imm - input; case DIVI: return input / imm; case DIVII: return imm / input; case MODI: return input % imm; case MODII: return imm % input; } } }; shared_ptr<GPUIntStream> exec_imm_op(shared_ptr<GPUIntStream> input, size_t imm, ImmOp op) { // prepare result shared_ptr<GPUIntStream> result = make_shared<GPUIntStream>(); cudaMalloc((void**) &result->device_timestamp, input->size*sizeof(int)); cudaMalloc((void**) &result->device_values, input->size*sizeof(int)); result->device_offset = input->device_offset; // copy timestamps auto input_ts = thrust::device_pointer_cast(input->device_timestamp); auto result_ts = thrust::device_pointer_cast(result->device_timestamp); thrust::copy_n(input_ts, input->size, result_ts); // get pointers and transform stream auto offset = thrust::device_pointer_cast(input->device_offset); auto input_vals_start = thrust::device_pointer_cast(input->device_values+*offset); auto input_vals_end = thrust::device_pointer_cast(input->device_values+ input->size); auto result_vals = thrust::device_pointer_cast(input->device_values + *offset); immFunctor f(imm, op); thrust::transform(input_vals_start, input_vals_end, result_vals, f); return result; } std::shared_ptr<GPUIntStream> add_imm_thrust(shared_ptr<GPUIntStream> input, size_t imm) { return exec_imm_op(input, imm, ADD); } shared_ptr<GPUIntStream> mul_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm) { return exec_imm_op(input, imm, MUL); } shared_ptr<GPUIntStream> sub_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm) { return exec_imm_op(input, imm, SUBI); } shared_ptr<GPUIntStream> sub_inv_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm) { return exec_imm_op(input, imm, SUBII); } shared_ptr<GPUIntStream> div_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm) { return exec_imm_op(input, imm, DIVI); } shared_ptr<GPUIntStream> div_inv_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm){ return exec_imm_op(input, imm, DIVII); } shared_ptr<GPUIntStream> mod_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm){ return exec_imm_op(input, imm, MODI); } shared_ptr<GPUIntStream> mod_inv_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm){ return exec_imm_op(input, imm, MODII); }
938bafd8002e94b8b7f2c1d63953138cffe05d3f.hip
// !!! This is a file automatically generated by hipify!!! #include "Mesh.cuh" #include "book.cuh" #include <fstream> #include <iostream> #include <string> #include <sstream> #include <vector> #include <unordered_map> #include <map> using namespace std; Mesh::Mesh() { edges = NULL; verts = NULL; faces = NULL; edgeNum = 0; vertNum = 0; } Mesh::~Mesh() { } bool Mesh::LoadFromFile(const char *fileName) { ifstream input(fileName); if (!input) { cout << "Cannot open file " << fileName << endl; return false; } string curLine; char t; Vertex point; Face face; stringstream sin; vector<Vertex> vertsVec; vector<Face> facesVec; vector<Edge> edgesVec; while (getline(input, curLine)) { sin.clear(); if (curLine[0] == 'v') { if (curLine[1] == ' ' || curLine[1] == '\t') { sin << curLine; sin >> t >> point.pos.x >> point.pos.y >> point.pos.z; vertsVec.push_back(point); } } else if (curLine[0] == 'f') { sin << curLine; sin >> t >> face.verts[0] >> face.verts[1] >> face.verts[2]; --face.verts[0]; --face.verts[1]; --face.verts[2]; facesVec.push_back(face); } } edgesVec.resize(facesVec.size() * 3); int edgeIdx = 0; map< pair<int, int>, int > verts2edge; for (int i = 0; i < facesVec.size(); ++i) { for (int j = 0; j < 3; ++j) { int v0 = facesVec[i].verts[j], v1 = facesVec[i].verts[(j + 1) % 3]; edgesVec[edgeIdx].verts[0] = v0; edgesVec[edgeIdx].verts[1] = v1; edgesVec[edgeIdx].edgeLen = (vertsVec[v0].pos - vertsVec[v1].pos).length(); edgesVec[edgeIdx].nextEdge = i * 3 + (j + 1) % 3; edgesVec[edgeIdx].prevEdge = i * 3 + (j + 2) % 3; edgesVec[edgeIdx].faceId = i; facesVec[i].edges[j] = edgeIdx; if (v0 > v1) std::swap(v0, v1); auto iter = verts2edge.find(make_pair(v0, v1)); if (iter != verts2edge.end()) { edgesVec[edgeIdx].twinEdge = iter->second; edgesVec[iter->second].twinEdge = edgeIdx; } else { verts2edge[make_pair(v0, v1)] = edgeIdx; } ++edgeIdx; } } edgeNum = edgesVec.size(); edges = new Edge[edgeNum]; copy(edgesVec.begin(), edgesVec.end(), edges); vertNum = vertsVec.size(); verts = new Vertex[vertNum]; copy(vertsVec.begin(), vertsVec.end(), verts); faceNum = facesVec.size(); faces = new Face[faceNum]; copy(facesVec.begin(), facesVec.end(), faces); for (int i = 0; i < edgeNum; ++i) { double l0 = edges[i].edgeLen; double l1 = edges[edges[i].nextEdge].edgeLen; double l2 = edges[edges[i].prevEdge].edgeLen; double curAngle = (l0*l0 + l2*l2 - l1*l1) / (2.0 * l0*l2); if (curAngle > 1.0) curAngle = 1.0; else if (curAngle < -1.0) curAngle = -1.0; curAngle = acos(curAngle); verts[edges[i].verts[0]].angle += curAngle; /* curAngle = (l0*l0 + l1*l1 - l2*l2) / (2.0 * l0*l1); if (curAngle > 1.0) curAngle = 1.0; else if (curAngle < -1.0) curAngle = -1.0; curAngle = acos(curAngle); angles[edges[i].verts[1]] += curAngle; */ verts[edges[i].verts[0]].firstEdge = i; } return true; } bool Mesh::copyToGPU(Mesh *d_mesh) { hipError_t cudaStatus; d_mesh->edgeNum = edgeNum; d_mesh->vertNum = vertNum; d_mesh->faceNum = faceNum; HANDLE_ERROR(hipMalloc((void**)&(d_mesh->edges), edgeNum * sizeof(Edge))); HANDLE_ERROR(hipMalloc((void**)&(d_mesh->verts), vertNum * sizeof(Vertex))); HANDLE_ERROR(hipMalloc((void**)&(d_mesh->faces), faceNum * sizeof(Face))); HANDLE_ERROR(hipMemcpy(d_mesh->edges, edges, edgeNum * sizeof(Edge), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(d_mesh->verts, verts, vertNum * sizeof(Vertex), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(d_mesh->faces, faces, faceNum * sizeof(Face), hipMemcpyHostToDevice)); return true; } void Mesh::clear() { if (edges) delete[] edges; if (verts) delete[] verts; if (faces) delete[] faces; edgeNum = 0; vertNum = 0; faceNum = 0; } void Mesh::clearGPU() { HANDLE_ERROR(hipFree(edges)); HANDLE_ERROR(hipFree(verts)); HANDLE_ERROR(hipFree(faces)); edgeNum = 0; vertNum = 0; faceNum = 0; }
938bafd8002e94b8b7f2c1d63953138cffe05d3f.cu
#include "Mesh.cuh" #include "book.cuh" #include <fstream> #include <iostream> #include <string> #include <sstream> #include <vector> #include <unordered_map> #include <map> using namespace std; Mesh::Mesh() { edges = NULL; verts = NULL; faces = NULL; edgeNum = 0; vertNum = 0; } Mesh::~Mesh() { } bool Mesh::LoadFromFile(const char *fileName) { ifstream input(fileName); if (!input) { cout << "Cannot open file " << fileName << endl; return false; } string curLine; char t; Vertex point; Face face; stringstream sin; vector<Vertex> vertsVec; vector<Face> facesVec; vector<Edge> edgesVec; while (getline(input, curLine)) { sin.clear(); if (curLine[0] == 'v') { if (curLine[1] == ' ' || curLine[1] == '\t') { sin << curLine; sin >> t >> point.pos.x >> point.pos.y >> point.pos.z; vertsVec.push_back(point); } } else if (curLine[0] == 'f') { sin << curLine; sin >> t >> face.verts[0] >> face.verts[1] >> face.verts[2]; --face.verts[0]; --face.verts[1]; --face.verts[2]; facesVec.push_back(face); } } edgesVec.resize(facesVec.size() * 3); int edgeIdx = 0; map< pair<int, int>, int > verts2edge; for (int i = 0; i < facesVec.size(); ++i) { for (int j = 0; j < 3; ++j) { int v0 = facesVec[i].verts[j], v1 = facesVec[i].verts[(j + 1) % 3]; edgesVec[edgeIdx].verts[0] = v0; edgesVec[edgeIdx].verts[1] = v1; edgesVec[edgeIdx].edgeLen = (vertsVec[v0].pos - vertsVec[v1].pos).length(); edgesVec[edgeIdx].nextEdge = i * 3 + (j + 1) % 3; edgesVec[edgeIdx].prevEdge = i * 3 + (j + 2) % 3; edgesVec[edgeIdx].faceId = i; facesVec[i].edges[j] = edgeIdx; if (v0 > v1) std::swap(v0, v1); auto iter = verts2edge.find(make_pair(v0, v1)); if (iter != verts2edge.end()) { edgesVec[edgeIdx].twinEdge = iter->second; edgesVec[iter->second].twinEdge = edgeIdx; } else { verts2edge[make_pair(v0, v1)] = edgeIdx; } ++edgeIdx; } } edgeNum = edgesVec.size(); edges = new Edge[edgeNum]; copy(edgesVec.begin(), edgesVec.end(), edges); vertNum = vertsVec.size(); verts = new Vertex[vertNum]; copy(vertsVec.begin(), vertsVec.end(), verts); faceNum = facesVec.size(); faces = new Face[faceNum]; copy(facesVec.begin(), facesVec.end(), faces); for (int i = 0; i < edgeNum; ++i) { double l0 = edges[i].edgeLen; double l1 = edges[edges[i].nextEdge].edgeLen; double l2 = edges[edges[i].prevEdge].edgeLen; double curAngle = (l0*l0 + l2*l2 - l1*l1) / (2.0 * l0*l2); if (curAngle > 1.0) curAngle = 1.0; else if (curAngle < -1.0) curAngle = -1.0; curAngle = acos(curAngle); verts[edges[i].verts[0]].angle += curAngle; /* curAngle = (l0*l0 + l1*l1 - l2*l2) / (2.0 * l0*l1); if (curAngle > 1.0) curAngle = 1.0; else if (curAngle < -1.0) curAngle = -1.0; curAngle = acos(curAngle); angles[edges[i].verts[1]] += curAngle; */ verts[edges[i].verts[0]].firstEdge = i; } return true; } bool Mesh::copyToGPU(Mesh *d_mesh) { cudaError_t cudaStatus; d_mesh->edgeNum = edgeNum; d_mesh->vertNum = vertNum; d_mesh->faceNum = faceNum; HANDLE_ERROR(cudaMalloc((void**)&(d_mesh->edges), edgeNum * sizeof(Edge))); HANDLE_ERROR(cudaMalloc((void**)&(d_mesh->verts), vertNum * sizeof(Vertex))); HANDLE_ERROR(cudaMalloc((void**)&(d_mesh->faces), faceNum * sizeof(Face))); HANDLE_ERROR(cudaMemcpy(d_mesh->edges, edges, edgeNum * sizeof(Edge), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_mesh->verts, verts, vertNum * sizeof(Vertex), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_mesh->faces, faces, faceNum * sizeof(Face), cudaMemcpyHostToDevice)); return true; } void Mesh::clear() { if (edges) delete[] edges; if (verts) delete[] verts; if (faces) delete[] faces; edgeNum = 0; vertNum = 0; faceNum = 0; } void Mesh::clearGPU() { HANDLE_ERROR(cudaFree(edges)); HANDLE_ERROR(cudaFree(verts)); HANDLE_ERROR(cudaFree(faces)); edgeNum = 0; vertNum = 0; faceNum = 0; }
b6ad9eec7e73b6a825f856a783d4a1b33d589506.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* % Function: demapper % Description: Maps complex-valued modulation symbols to binary digits using hard decision % Inputs: *symbols_R_h: Real part of the symbols % N: Number of output bits % Qm: Demodulation type (1=bpsk, 2=qpsk, 4=16qam, or 6=64qam) % Outputs: *bits_h: Demodulated bits By: Ahmad Nour & Mohammed Mostafa */ #include "demapper.cuh" __global__ void Demapper(float *symbols_R_d, float *symbols_I_d, Byte *bits_d, int Qm, int numThreads) { int idx = blockIdx.x * blockDim.x + threadIdx.x; //Not to run more threads than available data if (idx >= numThreads) return; float symb_real = symbols_R_d[idx]; float symb_imag = symbols_I_d[idx]; switch (Qm) { case 2: //QPSK if(symb_real >= 0) bits_d[idx * Qm] = 0; else bits_d[idx * Qm] = 1; if (symb_imag >= 0) bits_d[idx * Qm + 1] = 0; else bits_d[idx * Qm + 1] = 1; break; case 4: //QAM16 if (symb_real < 0) bits_d[idx * Qm] = 1; else bits_d[idx * Qm] = 0; if (symb_imag < 0) bits_d[idx * Qm + 1] = 1; else bits_d[idx * Qm + 1] = 0; if (fabsf(symb_real) < (2 * rsqrtf(10))) bits_d[idx * Qm + 2] = 0; else bits_d[idx * Qm + 2] = 1; if (fabsf(symb_imag) < (2 * rsqrtf(10))) bits_d[idx * Qm + 3] = 0; else bits_d[idx * Qm + 3] = 1; break; case 6: //QAM64 if (symb_real < 0) bits_d[idx * Qm] = 1; else bits_d[idx * Qm] = 0; if (symb_imag < 0) bits_d[idx * Qm + 1] = 1; else bits_d[idx * Qm + 1] = 0; if (fabsf(symb_real) < (4 * rsqrtf(42))) bits_d[idx * Qm + 2] = 0; else bits_d[idx * Qm + 2] = 1; if (fabsf(symb_imag) < (4 * rsqrtf(42))) bits_d[idx * Qm + 3] = 0; else bits_d[idx * Qm + 3] = 1; if (fabsf(symb_real) > (2 * rsqrtf(42)) && (fabsf(symb_real) < (6 * rsqrtf(42)))) bits_d[idx * Qm + 4] = 0; else bits_d[idx * Qm + 4] = 1; if (fabsf(symb_imag) > (2 * rsqrtf(42)) && (fabsf(symb_imag) < (6 * rsqrtf(42)))) bits_d[idx * Qm + 5] = 0; else bits_d[idx * Qm + 5] = 1; break; default: break; } } void demapper(float* symbols_R_h, float* symbols_I_h, Byte** bits_h, const int N, int Qm) { //For timing purpose float elapsed = 0; //For time calc. hipEvent_t start, stop; //Device data Byte *bits_d; float *symbols_R_d, *symbols_I_d; //Host data allocation *bits_h = (Byte *)malloc(sizeof(Byte)*N); //Device data allocation startTimer(); hipMalloc((void **)&symbols_R_d, sizeof(float)*(N / Qm)); hipMalloc((void **)&symbols_I_d, sizeof(float)*(N / Qm)); hipMalloc((void **)&bits_d, sizeof(Byte)*N); stopTimer("hipMalloc Time= %.6f ms\n", elapsed); //Copying data to device startTimer(); hipMemcpy(symbols_R_d, symbols_R_h, sizeof(float)*(N / Qm), hipMemcpyHostToDevice); hipMemcpy(symbols_I_d, symbols_I_h, sizeof(float)*(N / Qm), hipMemcpyHostToDevice); stopTimer("hipMemcpy Host->Device Time= %.6f ms\n", elapsed); //Calc. number of needed threads for calling kernel(s) int numThreads = (N / Qm); int blockDim = (numThreads < 1024) ? numThreads : 1024; // block size in threads (max 1024 thread) int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); // grid size in bloack (min 1) //Calling the kernel(s) startTimer(); Demapper << < gridDim, blockDim >> > (symbols_R_d, symbols_I_d, bits_d, Qm, numThreads); stopTimer("Demapper Time= %.6f ms\n", elapsed); //Retrieve data from device startTimer(); hipMemcpy(*bits_h, bits_d, sizeof(Byte)*N, hipMemcpyDeviceToHost); stopTimer("hipMemcpy Device->Host Time= %.6f ms\n", elapsed); // Cleanup hipFree(bits_d); hipFree(symbols_R_d); hipFree(symbols_I_d); //Destroy timers destroyTimers(); }
b6ad9eec7e73b6a825f856a783d4a1b33d589506.cu
/* % Function: demapper % Description: Maps complex-valued modulation symbols to binary digits using hard decision % Inputs: *symbols_R_h: Real part of the symbols % N: Number of output bits % Qm: Demodulation type (1=bpsk, 2=qpsk, 4=16qam, or 6=64qam) % Outputs: *bits_h: Demodulated bits By: Ahmad Nour & Mohammed Mostafa */ #include "demapper.cuh" __global__ void Demapper(float *symbols_R_d, float *symbols_I_d, Byte *bits_d, int Qm, int numThreads) { int idx = blockIdx.x * blockDim.x + threadIdx.x; //Not to run more threads than available data if (idx >= numThreads) return; float symb_real = symbols_R_d[idx]; float symb_imag = symbols_I_d[idx]; switch (Qm) { case 2: //QPSK if(symb_real >= 0) bits_d[idx * Qm] = 0; else bits_d[idx * Qm] = 1; if (symb_imag >= 0) bits_d[idx * Qm + 1] = 0; else bits_d[idx * Qm + 1] = 1; break; case 4: //QAM16 if (symb_real < 0) bits_d[idx * Qm] = 1; else bits_d[idx * Qm] = 0; if (symb_imag < 0) bits_d[idx * Qm + 1] = 1; else bits_d[idx * Qm + 1] = 0; if (fabsf(symb_real) < (2 * rsqrtf(10))) bits_d[idx * Qm + 2] = 0; else bits_d[idx * Qm + 2] = 1; if (fabsf(symb_imag) < (2 * rsqrtf(10))) bits_d[idx * Qm + 3] = 0; else bits_d[idx * Qm + 3] = 1; break; case 6: //QAM64 if (symb_real < 0) bits_d[idx * Qm] = 1; else bits_d[idx * Qm] = 0; if (symb_imag < 0) bits_d[idx * Qm + 1] = 1; else bits_d[idx * Qm + 1] = 0; if (fabsf(symb_real) < (4 * rsqrtf(42))) bits_d[idx * Qm + 2] = 0; else bits_d[idx * Qm + 2] = 1; if (fabsf(symb_imag) < (4 * rsqrtf(42))) bits_d[idx * Qm + 3] = 0; else bits_d[idx * Qm + 3] = 1; if (fabsf(symb_real) > (2 * rsqrtf(42)) && (fabsf(symb_real) < (6 * rsqrtf(42)))) bits_d[idx * Qm + 4] = 0; else bits_d[idx * Qm + 4] = 1; if (fabsf(symb_imag) > (2 * rsqrtf(42)) && (fabsf(symb_imag) < (6 * rsqrtf(42)))) bits_d[idx * Qm + 5] = 0; else bits_d[idx * Qm + 5] = 1; break; default: break; } } void demapper(float* symbols_R_h, float* symbols_I_h, Byte** bits_h, const int N, int Qm) { //For timing purpose float elapsed = 0; //For time calc. cudaEvent_t start, stop; //Device data Byte *bits_d; float *symbols_R_d, *symbols_I_d; //Host data allocation *bits_h = (Byte *)malloc(sizeof(Byte)*N); //Device data allocation startTimer(); cudaMalloc((void **)&symbols_R_d, sizeof(float)*(N / Qm)); cudaMalloc((void **)&symbols_I_d, sizeof(float)*(N / Qm)); cudaMalloc((void **)&bits_d, sizeof(Byte)*N); stopTimer("cudaMalloc Time= %.6f ms\n", elapsed); //Copying data to device startTimer(); cudaMemcpy(symbols_R_d, symbols_R_h, sizeof(float)*(N / Qm), cudaMemcpyHostToDevice); cudaMemcpy(symbols_I_d, symbols_I_h, sizeof(float)*(N / Qm), cudaMemcpyHostToDevice); stopTimer("cudaMemcpy Host->Device Time= %.6f ms\n", elapsed); //Calc. number of needed threads for calling kernel(s) int numThreads = (N / Qm); int blockDim = (numThreads < 1024) ? numThreads : 1024; // block size in threads (max 1024 thread) int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); // grid size in bloack (min 1) //Calling the kernel(s) startTimer(); Demapper << < gridDim, blockDim >> > (symbols_R_d, symbols_I_d, bits_d, Qm, numThreads); stopTimer("Demapper Time= %.6f ms\n", elapsed); //Retrieve data from device startTimer(); cudaMemcpy(*bits_h, bits_d, sizeof(Byte)*N, cudaMemcpyDeviceToHost); stopTimer("cudaMemcpy Device->Host Time= %.6f ms\n", elapsed); // Cleanup cudaFree(bits_d); cudaFree(symbols_R_d); cudaFree(symbols_I_d); //Destroy timers destroyTimers(); }
b22dbe9ac871694eb7d142362a6779561c6c1d11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** @file gpuinflate.cu Derived from zlib's contrib/puff.c, original copyright notice below */ /* Copyright (C) 2002-2013 Mark Adler, all rights reserved version 2.3, 21 Jan 2013 This software is provided 'as-is', without any express or implied warranty. In no event will the author be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. Mark Adler [email protected] */ #include "gpuinflate.hpp" #include "io_uncomp.hpp" #include <io/utilities/block_utils.cuh> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { constexpr int max_bits = 15; // maximum bits in a code constexpr int max_l_codes = 286; // maximum number of literal/length codes constexpr int max_d_codes = 30; // maximum number of distance codes constexpr int fix_l_codes = 288; // number of fixed literal/length codes constexpr int log2_len_lut = 10; constexpr int log2_dist_lut = 8; /** * @brief Intermediate arrays for building huffman tables */ struct scratch_arr { int16_t lengths[max_l_codes + max_d_codes]; ///< descriptor code lengths int16_t offs[max_bits + 1]; ///< offset in symbol table for each length (scratch) }; /** * @brief Huffman LUTs for length and distance codes */ struct lut_arr { int32_t lenlut[1 << log2_len_lut]; ///< LUT for length decoding int32_t distlut[1 << log2_dist_lut]; ///< LUT for fast distance decoding }; /// 4 batches of 32 symbols constexpr int log2_batch_count = 2; // 1..5 constexpr int log2_batch_size = 5; constexpr int batch_count = (1 << log2_batch_count); constexpr int batch_size = (1 << log2_batch_size); /** * @brief Inter-warp communication queue */ struct xwarp_s { int32_t batch_len[batch_count]; //< Length of each batch - <0:end, 0:not ready, >0:symbol count union { uint32_t symqueue[batch_count * batch_size]; uint8_t symqueue8[batch_count * batch_size * 4]; } u; }; #define ENABLE_PREFETCH 1 #if ENABLE_PREFETCH constexpr int log2_prefetch_size = 9; // Must be at least LOG2_BATCH_SIZE+3 constexpr int prefetch_size = (1 << log2_prefetch_size); /// @brief Prefetcher state struct prefetch_queue_s { const uint8_t* cur_p; ///< Prefetch location int run; ///< prefetcher will exit when run=0 uint8_t pref_data[prefetch_size]; }; template <typename T> inline __device__ volatile uint32_t* prefetch_addr32(volatile prefetch_queue_s& q, T* ptr) { return reinterpret_cast<volatile uint32_t*>(&q.pref_data[(prefetch_size - 4) & (size_t)(ptr)]); } #endif // ENABLE_PREFETCH /** * @brief Inflate decompressor state */ struct inflate_state_s { // output state uint8_t* out; ///< output buffer uint8_t* outbase; ///< start of output buffer uint8_t* outend; ///< end of output buffer // Input state uint8_t const* cur; ///< input buffer uint8_t const* end; ///< end of input buffer uint2 bitbuf; ///< bit buffer (64-bit) uint32_t bitpos; ///< position in bit buffer int32_t err; ///< Error status int btype; ///< current block type int blast; ///< last block uint32_t stored_blk_len; ///< length of stored (uncompressed) block uint16_t first_slow_len; ///< first code not in fast LUT uint16_t index_slow_len; uint16_t first_slow_dist; uint16_t index_slow_dist; volatile xwarp_s x; #if ENABLE_PREFETCH volatile prefetch_queue_s pref; #endif int16_t lencnt[max_bits + 1]; int16_t lensym[fix_l_codes]; // Assumes fix_l_codes >= max_l_codes int16_t distcnt[max_bits + 1]; int16_t distsym[max_d_codes]; union { scratch_arr scratch; lut_arr lut; } u; }; inline __device__ unsigned int bfe(unsigned int source, unsigned int bit_start, unsigned int num_bits) { unsigned int bits; asm("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"(source), "r"(bit_start), "r"(num_bits)); return bits; }; inline __device__ uint32_t showbits(inflate_state_s* s, uint32_t n) { uint32_t next32 = __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos); return (next32 & ((1 << n) - 1)); } inline __device__ uint32_t nextbits32(inflate_state_s* s) { return __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos); } inline __device__ void skipbits(inflate_state_s* s, uint32_t n) { uint32_t bitpos = s->bitpos + n; if (bitpos >= 32) { auto cur = s->cur + 8; s->bitbuf.x = s->bitbuf.y; s->bitbuf.y = (cur < s->end) ? *reinterpret_cast<uint32_t const*>(cur) : 0; s->cur = cur - 4; bitpos &= 0x1f; } s->bitpos = bitpos; } // TODO: If we require 4-byte alignment of input bitstream & length (padded), reading bits would // become quite a bit faster __device__ uint32_t getbits(inflate_state_s* s, uint32_t n) { uint32_t v = showbits(s, n); skipbits(s, n); return v; } /** * @brief Decode a code from the stream s using huffman table {symbols,counts}. * Return the symbol or a negative value if there is an error. * If all of the lengths are zero, i.e. an empty code, or if the code is * incomplete and an invalid code is received, then -10 is returned after * reading max_bits bits. * * Format notes: * * - The codes as stored in the compressed data are bit-reversed relative to * a simple integer ordering of codes of the same lengths. Hence below the * bits are pulled from the compressed data one at a time and used to * build the code value reversed from what is in the stream in order to * permit simple integer comparisons for decoding. A table-based decoding * scheme (as used in zlib) does not need to do this reversal. * * - The first code for the shortest length is all zeros. Subsequent codes of * the same length are simply integer increments of the previous code. When * moving up a length, a zero bit is appended to the code. For a complete * code, the last code of the longest length will be all ones. * * - Incomplete codes are handled by this decoder, since they are permitted * in the deflate format. See the format notes for fixed() and dynamic(). */ __device__ int decode(inflate_state_s* s, const int16_t* counts, const int16_t* symbols) { unsigned int len; // current number of bits in code unsigned int code; // len bits being decoded unsigned int first; // first code of length len unsigned int count; // number of codes of length len uint32_t next32r = __brev(nextbits32(s)); first = 0; for (len = 1; len <= max_bits; len++) { code = (next32r >> (32 - len)) - first; count = counts[len]; if (code < count) // if length len, return symbol { skipbits(s, len); return symbols[code]; } symbols += count; // else update for next length first += count; first <<= 1; } return -10; // ran out of codes } /** * @brief Given the list of code lengths length[0..n-1] representing a canonical * Huffman code for n symbols, construct the tables required to decode those * codes. Those tables are the number of codes of each length, and the symbols * sorted by length, retaining their original order within each length. The * return value is zero for a complete code set, negative for an over- * subscribed code set, and positive for an incomplete code set. The tables * can be used if the return value is zero or positive, but they cannot be used * if the return value is negative. If the return value is zero, it is not * possible for decode() using that table to return an error--any stream of * enough bits will resolve to a symbol. If the return value is positive, then * it is possible for decode() using that table to return an error for received * codes past the end of the incomplete lengths. * * Not used by decode(), but used for error checking, count[0] is the number * of the n symbols not in the code. So n - count[0] is the number of * codes. This is useful for checking for incomplete codes that have more than * one symbol, which is an error in a dynamic block. * * Assumption: for all i in 0..n-1, 0 <= length[i] <= max_bits * This is assured by the construction of the length arrays in dynamic() and * fixed() and is not verified by construct(). * * Format notes: * * - Permitted and expected examples of incomplete codes are one of the fixed * codes and any code with a single symbol which in deflate is coded as one * bit instead of zero bits. See the format notes for fixed() and dynamic(). * * - Within a given code length, the symbols are kept in ascending order for * the code bits definition. */ __device__ int construct( inflate_state_s* s, int16_t* counts, int16_t* symbols, const int16_t* length, int n) { int symbol; // current symbol when stepping through length[] int len; // current length when stepping through counts[] int left; // number of possible codes left of current length int16_t* offs = s->u.scratch.offs; // count number of codes of each length for (len = 0; len <= max_bits; len++) counts[len] = 0; for (symbol = 0; symbol < n; symbol++) (counts[length[symbol]])++; // assumes lengths are within bounds if (counts[0] == n) // no codes! return 0; // complete, but decode() will fail // check for an over-subscribed or incomplete set of lengths left = 1; // one possible code of zero length for (len = 1; len <= max_bits; len++) { left <<= 1; // one more bit, double codes left left -= counts[len]; // deduct count from possible codes if (left < 0) return left; // over-subscribed--return negative } // left > 0 means incomplete // generate offsets into symbol table for each length for sorting offs[1] = 0; for (len = 1; len < max_bits; len++) offs[len + 1] = offs[len] + counts[len]; // put symbols in table sorted by length, by symbol order within each length for (symbol = 0; symbol < n; symbol++) if (length[symbol] != 0) symbols[offs[length[symbol]]++] = symbol; // return zero for complete set, positive for incomplete set return left; } /// permutation of code length codes static const __device__ __constant__ uint8_t g_code_order[19 + 1] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, 0xff}; /// Dynamic block (custom huffman tables) __device__ int init_dynamic(inflate_state_s* s) { int nlen, ndist, ncode; /* number of lengths in descriptor */ int index; /* index of lengths[] */ int err; /* construct() return value */ int16_t* lengths = s->u.scratch.lengths; // get number of lengths in each table, check lengths nlen = getbits(s, 5) + 257; ndist = getbits(s, 5) + 1; ncode = getbits(s, 4) + 4; if (nlen > max_l_codes || ndist > max_d_codes) { return -3; // bad counts } // read code length code lengths (really), missing lengths are zero for (index = 0; index < ncode; index++) lengths[g_code_order[index]] = getbits(s, 3); for (; index < 19; index++) lengths[g_code_order[index]] = 0; // build huffman table for code lengths codes (use lencode temporarily) err = construct(s, s->lencnt, s->lensym, lengths, 19); if (err != 0) // require complete code set here return -4; // read length/literal and distance code length tables index = 0; while (index < nlen + ndist) { int symbol = decode(s, s->lencnt, s->lensym); if (symbol < 0) return symbol; // invalid symbol if (symbol < 16) // length in 0..15 lengths[index++] = symbol; else { // repeat instruction int len = 0; // last length to repeat, assume repeating zeros if (symbol == 16) { // repeat last length 3..6 times if (index == 0) return -5; // no last length! len = lengths[index - 1]; // last length symbol = 3 + getbits(s, 2); } else if (symbol == 17) // repeat zero 3..10 times symbol = 3 + getbits(s, 3); else // == 18, repeat zero 11..138 times symbol = 11 + getbits(s, 7); if (index + symbol > nlen + ndist) return -6; // too many lengths! while (symbol--) // repeat last or zero symbol times lengths[index++] = len; } } // check for end-of-block code -- there better be one! if (lengths[256] == 0) return -9; // build huffman table for literal/length codes err = construct(s, s->lencnt, s->lensym, lengths, nlen); if (err && (err < 0 || nlen != s->lencnt[0] + s->lencnt[1])) return -7; // incomplete code ok only for single length 1 code // build huffman table for distance codes err = construct(s, s->distcnt, s->distsym, &lengths[nlen], ndist); if (err && (err < 0 || ndist != s->distcnt[0] + s->distcnt[1])) return -8; // incomplete code ok only for single length 1 code return 0; } /** * @brief Initializes a fixed codes block. * * Format notes: * * - This block type can be useful for compressing small amounts of data for * which the size of the code descriptions in a dynamic block exceeds the * benefit of custom codes for that block. For fixed codes, no bits are * spent on code descriptions. Instead the code lengths for literal/length * codes and distance codes are fixed. The specific lengths for each symbol * can be seen in the "for" loops below. * * - The literal/length code is complete, but has two symbols that are invalid * and should result in an error if received. This cannot be implemented * simply as an incomplete code since those two symbols are in the "middle" * of the code. They are eight bits long and the longest literal/length\ * code is nine bits. Therefore the code must be constructed with those * symbols, and the invalid symbols must be detected after decoding. * * - The fixed distance codes also have two invalid symbols that should result * in an error if received. Since all of the distance codes are the same * length, this can be implemented as an incomplete code. Then the invalid * codes are detected while decoding. */ __device__ int init_fixed(inflate_state_s* s) { int16_t* lengths = s->u.scratch.lengths; int symbol; // literal/length table for (symbol = 0; symbol < 144; symbol++) lengths[symbol] = 8; for (; symbol < 256; symbol++) lengths[symbol] = 9; for (; symbol < 280; symbol++) lengths[symbol] = 7; for (; symbol < fix_l_codes; symbol++) lengths[symbol] = 8; construct(s, s->lencnt, s->lensym, lengths, fix_l_codes); // distance table for (symbol = 0; symbol < max_d_codes; symbol++) lengths[symbol] = 5; // build huffman table for distance codes construct(s, s->distcnt, s->distsym, lengths, max_d_codes); return 0; } /** * @brief Decode literal/length and distance codes until an end-of-block code. * * Format notes: * * - Compressed data that is after the block type if fixed or after the code * description if dynamic is a combination of literals and length/distance * pairs terminated by and end-of-block code. Literals are simply Huffman * coded bytes. A length/distance pair is a coded length followed by a * coded distance to represent a string that occurs earlier in the * uncompressed data that occurs again at the current location. * * - Literals, lengths, and the end-of-block code are combined into a single * code of up to 286 symbols. They are 256 literals (0..255), 29 length * symbols (257..285), and the end-of-block symbol (256). * * - There are 256 possible lengths (3..258), and so 29 symbols are not enough * to represent all of those. Lengths 3..10 and 258 are in fact represented * by just a length symbol. Lengths 11..257 are represented as a symbol and * some number of extra bits that are added as an integer to the base length * of the length symbol. The number of extra bits is determined by the base * length symbol. These are in the static arrays below, lens[] for the base * lengths and lext[] for the corresponding number of extra bits. * * - The reason that 258 gets its own symbol is that the longest length is used * often in highly redundant files. Note that 258 can also be coded as the * base value 227 plus the maximum extra value of 31. While a good deflate * should never do this, it is not an error, and should be decoded properly. * * - If a length is decoded, including its extra bits if any, then it is * followed a distance code. There are up to 30 distance symbols. Again * there are many more possible distances (1..32768), so extra bits are added * to a base value represented by the symbol. The distances 1..4 get their * own symbol, but the rest require extra bits. The base distances and * corresponding number of extra bits are below in the static arrays dist[] * and dext[]. * * - Literal bytes are simply written to the output. A length/distance pair is * an instruction to copy previously uncompressed bytes to the output. The * copy is from distance bytes back in the output stream, copying for length * bytes. * * - Distances pointing before the beginning of the output data are not * permitted. * * - Overlapped copies, where the length is greater than the distance, are * allowed and common. For example, a distance of one and a length of 258 * simply copies the last byte 258 times. A distance of four and a length of * twelve copies the last four bytes three times. A simple forward copy * ignoring whether the length is greater than the distance or not implements * this correctly. You should not use memcpy() since its behavior is not * defined for overlapped arrays. You should not use memmove() or bcopy() * since though their behavior -is- defined for overlapping arrays, it is * defined to do the wrong thing in this case. */ /// permutation of code length codes static const __device__ __constant__ uint16_t g_lens[29] = { // Size base for length codes 257..285 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258}; static const __device__ __constant__ uint16_t g_lext[29] = { // Extra bits for length codes 257..285 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0}; static const __device__ __constant__ uint16_t g_dists[30] = { // Offset base for distance codes 0..29 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577}; static const __device__ __constant__ uint16_t g_dext[30] = { // Extra bits for distance codes 0..29 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; /// @brief Thread 0 only: decode bitstreams and output symbols into the symbol queue __device__ void decode_symbols(inflate_state_s* s) { uint32_t bitpos = s->bitpos; uint2 bitbuf = s->bitbuf; auto cur = s->cur; auto end = s->end; int32_t batch = 0; int32_t sym, batch_len; do { volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size]; // Wait for the next batch entry to be empty #if ENABLE_PREFETCH // Wait for prefetcher to fetch a worst-case of 48 bits per symbol while ((*(volatile int32_t*)&s->pref.cur_p - (int32_t)(size_t)cur < batch_size * 6) || (s->x.batch_len[batch] != 0)) {} #else while (s->x.batch_len[batch] != 0) {} #endif batch_len = 0; #if ENABLE_PREFETCH if (cur + (bitpos >> 3) >= end) { s->err = 1; break; } #endif // Inner loop decoding symbols do { uint32_t next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s); uint32_t len; sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)]; if ((uint32_t)sym < (uint32_t)(0x100 << 5)) { // We can lookup a second symbol if this was a short literal len = sym & 0x1f; sym >>= 5; b[batch_len++] = sym; next32 >>= len; bitpos += len; sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)]; } if (sym > 0) // short symbol { len = sym & 0x1f; sym = ((sym >> 5) & 0x3ff) + ((next32 >> (sym >> 24)) & ((sym >> 16) & 0x1f)); } else { // Slow length path uint32_t next32r = __brev(next32); const int16_t* symbols = &s->lensym[s->index_slow_len]; unsigned int first = s->first_slow_len; int lext; #pragma unroll 1 for (len = log2_len_lut + 1; len <= max_bits; len++) { unsigned int code = (next32r >> (32 - len)) - first; unsigned int count = s->lencnt[len]; if (code < count) // if length len, return symbol { sym = symbols[code]; break; } symbols += count; // else update for next length first += count; first <<= 1; } if (len > max_bits) { s->err = -10; sym = 256; len = 0; } if (sym > 256) { sym -= 257; lext = g_lext[sym]; sym = 256 + g_lens[sym] + bfe(next32, len, lext); len += lext; } } if (sym > 256) { int dist, dext; // skipbits(s, len) inlined - no limit check bitpos += len; if (bitpos >= 32) { bitbuf.x = bitbuf.y; #if ENABLE_PREFETCH bitbuf.y = *prefetch_addr32(s->pref, cur + 8); cur += 4; #else cur += 8; bitbuf.y = (cur < end) ? *(const uint32_t*)cur : 0; cur -= 4; #endif bitpos &= 0x1f; } // get distance next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s); dist = s->u.lut.distlut[next32 & ((1 << log2_dist_lut) - 1)]; if (dist > 0) { len = dist & 0x1f; dext = bfe(dist, 20, 5); dist = bfe(dist, 5, 15); sym |= (dist + bfe(next32, len, dext)) << 16; len += dext; } else { uint32_t next32r = __brev(next32); const int16_t* symbols = &s->distsym[s->index_slow_dist]; unsigned int first = s->first_slow_dist; #pragma unroll 1 for (len = log2_dist_lut + 1; len <= max_bits; len++) { unsigned int code = (next32r >> (32 - len)) - first; unsigned int count = s->distcnt[len]; if (code < count) // if length len, return symbol { dist = symbols[code]; break; } symbols += count; // else update for next length first += count; first <<= 1; } if (len > max_bits) { s->err = -10; sym = 256; len = 0; } else { dext = g_dext[dist]; sym |= (g_dists[dist] + bfe(next32, len, dext)) << 16; len += dext; } } } // skipbits(s, len) inlined with added error check for reading past the end of the input // buffer bitpos += len; if (bitpos >= 32) { bitbuf.x = bitbuf.y; #if ENABLE_PREFETCH bitbuf.y = *prefetch_addr32(s->pref, cur + 8); cur += 4; #else cur += 8; if (cur < end) { bitbuf.y = *(const uint32_t*)cur; cur -= 4; } else { bitbuf.y = 0; cur -= 4; if (cur > end) { s->err = 1; sym = 256; } } #endif bitpos &= 0x1f; } if (sym == 256) break; b[batch_len++] = sym; } while (batch_len < batch_size - 1); s->x.batch_len[batch] = batch_len; #if ENABLE_PREFETCH ((volatile inflate_state_s*)s)->cur = cur; #endif if (batch_len != 0) batch = (batch + 1) & (batch_count - 1); } while (sym != 256); while (s->x.batch_len[batch] != 0) {} s->x.batch_len[batch] = -1; s->bitbuf = bitbuf; s->bitpos = bitpos; #if !ENABLE_PREFETCH s->cur = cur; #endif } /** * @brief Build lookup tables for faster decode * LUT format is symbols*16+length */ __device__ void init_length_lut(inflate_state_s* s, int t) { int32_t* lut = s->u.lut.lenlut; for (uint32_t bits = t; bits < (1 << log2_len_lut); bits += blockDim.x) { const int16_t* cnt = s->lencnt; const int16_t* symbols = s->lensym; int sym = -10 << 5; unsigned int first = 0; unsigned int rbits = __brev(bits) >> (32 - log2_len_lut); for (unsigned int len = 1; len <= log2_len_lut; len++) { unsigned int code = (rbits >> (log2_len_lut - len)) - first; unsigned int count = cnt[len]; if (code < count) { sym = symbols[code]; if (sym > 256) { int lext = g_lext[sym - 257]; sym = (256 + g_lens[sym - 257]) | (((1 << lext) - 1) << (16 - 5)) | (len << (24 - 5)); len += lext; } sym = (sym << 5) | len; break; } symbols += count; // else update for next length first += count; first <<= 1; } lut[bits] = sym; } if (!t) { unsigned int first = 0; unsigned int index = 0; const int16_t* cnt = s->lencnt; for (unsigned int len = 1; len <= log2_len_lut; len++) { unsigned int count = cnt[len]; index += count; first += count; first <<= 1; } s->first_slow_len = first; s->index_slow_len = index; } } /** * @brief Build lookup tables for faster decode of distance symbol * LUT format is symbols*16+length */ __device__ void init_distance_lut(inflate_state_s* s, int t) { int32_t* lut = s->u.lut.distlut; for (uint32_t bits = t; bits < (1 << log2_dist_lut); bits += blockDim.x) { const int16_t* cnt = s->distcnt; const int16_t* symbols = s->distsym; int sym = 0; unsigned int first = 0; unsigned int rbits = __brev(bits) >> (32 - log2_dist_lut); for (unsigned int len = 1; len <= log2_dist_lut; len++) { unsigned int code = (rbits >> (log2_dist_lut - len)) - first; unsigned int count = cnt[len]; if (code < count) { int dist = symbols[code]; int dext = g_dext[dist]; sym = g_dists[dist] | (dext << 15); sym = (sym << 5) | len; break; } symbols += count; // else update for next length first += count; first <<= 1; } lut[bits] = sym; } if (!t) { unsigned int first = 0; unsigned int index = 0; const int16_t* cnt = s->distcnt; for (unsigned int len = 1; len <= log2_dist_lut; len++) { unsigned int count = cnt[len]; index += count; first += count; first <<= 1; } s->first_slow_dist = first; s->index_slow_dist = index; } } /// @brief WARP1: process symbols and output uncompressed stream __device__ void process_symbols(inflate_state_s* s, int t) { uint8_t* out = s->out; const uint8_t* outend = s->outend; const uint8_t* outbase = s->outbase; int batch = 0; do { volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size]; int batch_len = 0; if (t == 0) { while ((batch_len = s->x.batch_len[batch]) == 0) {} } batch_len = shuffle(batch_len); if (batch_len < 0) { break; } auto const symt = (t < batch_len) ? b[t] : 256; auto const lit_mask = ballot(symt >= 256); auto pos = min((__ffs(lit_mask) - 1) & 0xff, 32); if (t == 0) { s->x.batch_len[batch] = 0; } if (t < pos && out + t < outend) { out[t] = symt; } out += pos; batch_len -= pos; while (batch_len > 0) { int dist, len, symbol; // Process a non-literal symbol symbol = shuffle(symt, pos); len = max((symbol & 0xffff) - 256, 0); // max should be unnecessary, but just in case dist = symbol >> 16; for (int i = t; i < len; i += 32) { const uint8_t* src = out + ((i >= dist) ? (i % dist) : i) - dist; uint8_t b = (src < outbase) ? 0 : *src; if (out + i < outend) { out[i] = b; } } out += len; pos++; batch_len--; // Process subsequent literals, if any if (!((lit_mask >> pos) & 1)) { len = min((__ffs(lit_mask >> pos) - 1) & 0xff, batch_len); symbol = shuffle(symt, (pos + t) & 0x1f); if (t < len && out + t < outend) { out[t] = symbol; } out += len; pos += len; batch_len -= len; } } batch = (batch + 1) & (batch_count - 1); } while (true); if (t == 0) { s->out = out; } } /** * @brief Initializes a stored block. * * Format notes: * * - After the two-bit stored block type (00), the stored block length and * stored bytes are byte-aligned for fast copying. Therefore any leftover * bits in the byte that has the last bit of the type, as many as seven, are * discarded. The value of the discarded bits are not defined and should not * be checked against any expectation. * * - The second inverted copy of the stored block length does not have to be * checked, but it's probably a good idea to do so anyway. * * - A stored block can have zero length. This is sometimes used to byte-align * subsets of the compressed data for random access or partial recovery. */ __device__ int init_stored(inflate_state_s* s) { uint32_t len, nlen; // length of stored block // Byte align if (s->bitpos & 7) { skipbits(s, 8 - (s->bitpos & 7)); } if (s->cur + (s->bitpos >> 3) >= s->end) { return 2; // Not enough input } // get length and check against its one's complement len = getbits(s, 16); nlen = getbits(s, 16); if (len != (nlen ^ 0xffff)) { return -2; // didn't match complement! } if (s->cur + (s->bitpos >> 3) + len > s->end) { return 2; // Not enough input } s->stored_blk_len = len; // done with a valid stored block return 0; } /// Copy bytes from stored block to destination __device__ void copy_stored(inflate_state_s* s, int t) { auto len = s->stored_blk_len; auto cur = s->cur + s->bitpos / 8; auto out = s->out; auto outend = s->outend; auto const slow_bytes = min(len, (int)((16 - reinterpret_cast<size_t>(out)) % 16)); // Slow copy until output is 16B aligned if (slow_bytes) { for (int i = t; i < slow_bytes; i += blockDim.x) { if (out + i < outend) { out[i] = cur[i]; // Input range has already been validated in init_stored() } } cur += slow_bytes; out += slow_bytes; len -= slow_bytes; } auto fast_bytes = len; if (out < outend) { fast_bytes = (int)min((size_t)fast_bytes, (outend - out)); } fast_bytes &= ~0xf; auto bitpos = ((int)((size_t)cur % 4)) * 8; auto cur4 = cur - (bitpos / 8); if (out < outend) { // Fast copy 16 bytes at a time for (int i = t * 16; i < fast_bytes; i += blockDim.x * 16) { uint4 u; u.x = *reinterpret_cast<const uint32_t*>(cur4 + i + 0 * 4); u.y = *reinterpret_cast<const uint32_t*>(cur4 + i + 1 * 4); u.z = *reinterpret_cast<const uint32_t*>(cur4 + i + 2 * 4); u.w = *reinterpret_cast<const uint32_t*>(cur4 + i + 3 * 4); if (bitpos != 0) { uint32_t v = (bitpos != 0) ? *reinterpret_cast<const uint32_t*>(cur4 + i + 4 * 4) : 0; u.x = __funnelshift_rc(u.x, u.y, bitpos); u.y = __funnelshift_rc(u.y, u.z, bitpos); u.z = __funnelshift_rc(u.z, u.w, bitpos); u.w = __funnelshift_rc(u.w, v, bitpos); } *reinterpret_cast<uint4*>(out + i) = u; } } cur += fast_bytes; out += fast_bytes; len -= fast_bytes; // Slow copy for remaining bytes for (int i = t; i < len; i += blockDim.x) { if (out + i < outend) { out[i] = cur[i]; // Input range has already been validated in init_stored() } } out += len; __syncthreads(); if (t == 0) { // Reset bitstream to end of block auto p = cur + len; auto prefix_bytes = (uint32_t)(((size_t)p) & 3); p -= prefix_bytes; s->cur = p; s->bitbuf.x = (p < s->end) ? *reinterpret_cast<uint32_t const*>(p) : 0; p += 4; s->bitbuf.y = (p < s->end) ? *reinterpret_cast<uint32_t const*>(p) : 0; s->bitpos = prefix_bytes * 8; s->out = out; } } #if ENABLE_PREFETCH __device__ void init_prefetcher(inflate_state_s* s, int t) { if (t == 0) { s->pref.cur_p = s->cur; s->pref.run = 1; } } __device__ void prefetch_warp(volatile inflate_state_s* s, int t) { const uint8_t* cur_p = s->pref.cur_p; const uint8_t* end = s->end; while (shuffle((t == 0) ? s->pref.run : 0)) { auto cur_lo = (int32_t)(size_t)cur_p; int do_pref = shuffle((t == 0) ? (cur_lo - *(volatile int32_t*)&s->cur < prefetch_size - 32 * 4 - 4) : 0); if (do_pref) { const uint8_t* p = cur_p + 4 * t; *prefetch_addr32(s->pref, p) = (p < end) ? *reinterpret_cast<const uint32_t*>(p) : 0; cur_p += 4 * 32; __threadfence_block(); __syncwarp(); if (!t) { s->pref.cur_p = cur_p; __threadfence_block(); } } } } #endif // ENABLE_PREFETCH /** * @brief Parse GZIP header * See https://tools.ietf.org/html/rfc1952 */ __device__ int parse_gzip_header(const uint8_t* src, size_t src_size) { int hdr_len = -1; if (src_size >= 18) { uint32_t sig = (src[0] << 16) | (src[1] << 8) | src[2]; if (sig == 0x1f'8b08) // 24-bit GZIP inflate signature {0x1f, 0x8b, 0x08} { uint8_t flags = src[3]; hdr_len = 10; if (flags & GZIPHeaderFlag::fextra) // Extra fields present { int xlen = src[hdr_len] | (src[hdr_len + 1] << 8); hdr_len += xlen; if (hdr_len >= src_size) return -1; } if (flags & GZIPHeaderFlag::fname) // Original file name present { // Skip zero-terminated string do { if (hdr_len >= src_size) return -1; } while (src[hdr_len++] != 0); } if (flags & GZIPHeaderFlag::fcomment) // Comment present { // Skip zero-terminated string do { if (hdr_len >= src_size) return -1; } while (src[hdr_len++] != 0); } if (flags & GZIPHeaderFlag::fhcrc) // Header CRC present { hdr_len += 2; } if (hdr_len + 8 >= src_size) hdr_len = -1; } } return hdr_len; } /** * @brief INFLATE decompression kernel * * blockDim {block_size,1,1} * * @tparam block_size Thread block dimension for this call * @param inputs Source and destination buffer information per block * @param outputs Destination buffer information per block * @param results Decompression status buffer per block * @param parse_hdr If nonzero, indicates that the compressed bitstream includes a GZIP header */ template <int block_size> __global__ void __launch_bounds__(block_size) inflate_kernel(device_span<device_span<uint8_t const> const> inputs, device_span<device_span<uint8_t> const> outputs, device_span<compression_result> results, gzip_header_included parse_hdr) { __shared__ __align__(16) inflate_state_s state_g; int t = threadIdx.x; int z = blockIdx.x; inflate_state_s* state = &state_g; if (!t) { auto p = inputs[z].data(); auto src_size = inputs[z].size(); // Parse header if needed state->err = 0; if (parse_hdr == gzip_header_included::YES) { int hdr_len = parse_gzip_header(p, src_size); src_size = (src_size >= 8) ? src_size - 8 : 0; // ignore footer if (hdr_len >= 0) { p += hdr_len; src_size -= hdr_len; } else { state->err = hdr_len; } } // Initialize shared state state->out = outputs[z].data(); state->outbase = state->out; state->outend = state->out + outputs[z].size(); state->end = p + src_size; auto const prefix_bytes = (uint32_t)(((size_t)p) & 3); p -= prefix_bytes; state->cur = p; state->bitbuf.x = (p < state->end) ? *reinterpret_cast<uint32_t const*>(p) : 0; p += 4; state->bitbuf.y = (p < state->end) ? *reinterpret_cast<uint32_t const*>(p) : 0; state->bitpos = prefix_bytes * 8; } __syncthreads(); // Main loop decoding blocks while (!state->err) { if (!t) { // Thread0: read last flag, block type and custom huffman tables if any if (state->cur + (state->bitpos >> 3) >= state->end) state->err = 2; else { state->blast = getbits(state, 1); state->btype = getbits(state, 2); if (state->btype == 0) state->err = init_stored(state); else if (state->btype == 1) state->err = init_fixed(state); else if (state->btype == 2) state->err = init_dynamic(state); else state->err = -1; // Invalid block } } __syncthreads(); if (!state->err && (state->btype == 1 || state->btype == 2)) { // Initializes lookup tables (block wide) init_length_lut(state, t); init_distance_lut(state, t); #if ENABLE_PREFETCH // Initialize prefetcher init_prefetcher(state, t); #endif if (t < batch_count) { state->x.batch_len[t] = 0; } __syncthreads(); // decode data until end-of-block code if (t < 1 * 32) { // WARP0: decode variable-length symbols if (!t) { // Thread0: decode symbols (single threaded) decode_symbols(state); #if ENABLE_PREFETCH state->pref.run = 0; #endif } } else if (t < 2 * 32) { // WARP1: perform LZ77 using length and distance codes from WARP0 process_symbols(state, t & 0x1f); } #if ENABLE_PREFETCH else if (t < 3 * 32) { // WARP2: Prefetcher: prefetch data for WARP0 prefetch_warp(state, t & 0x1f); } #endif // else WARP3: idle } else if (!state->err && state->btype == 0) { // Uncompressed block (block-wide memcpy) copy_stored(state, t); } if (state->blast) break; __syncthreads(); } __syncthreads(); // Output decompression status and length if (!t) { if (state->err == 0 && state->cur + ((state->bitpos + 7) >> 3) > state->end) { // Read past the end of the input buffer state->err = 2; } else if (state->err == 0 && state->out > state->outend) { // Output buffer too small state->err = 1; } results[z].bytes_written = state->out - state->outbase; results[z].status = [&]() { switch (state->err) { case 0: return compression_status::SUCCESS; case 1: return compression_status::OUTPUT_OVERFLOW; default: return compression_status::FAILURE; } }(); results[z].reserved = (int)(state->end - state->cur); // Here mainly for debug purposes } } /** * @brief Copy a group of buffers * * blockDim {1024,1,1} * * @param inputs Source and destination information per block */ __global__ void __launch_bounds__(1024) copy_uncompressed_kernel(device_span<device_span<uint8_t const> const> inputs, device_span<device_span<uint8_t> const> outputs) { __shared__ const uint8_t* volatile src_g; __shared__ uint8_t* volatile dst_g; __shared__ uint32_t volatile copy_len_g; uint32_t t = threadIdx.x; uint32_t z = blockIdx.x; const uint8_t* src; uint8_t* dst; uint32_t len, src_align_bytes, src_align_bits, dst_align_bytes; if (!t) { src = inputs[z].data(); dst = outputs[z].data(); len = static_cast<uint32_t>(min(inputs[z].size(), outputs[z].size())); src_g = src; dst_g = dst; copy_len_g = len; } __syncthreads(); src = src_g; dst = dst_g; len = copy_len_g; // Align output to 32-bit dst_align_bytes = 3 & -reinterpret_cast<intptr_t>(dst); if (dst_align_bytes != 0) { uint32_t align_len = min(dst_align_bytes, len); if (t < align_len) { dst[t] = src[t]; } src += align_len; dst += align_len; len -= align_len; } src_align_bytes = (uint32_t)(3 & reinterpret_cast<uintptr_t>(src)); src_align_bits = src_align_bytes << 3; while (len >= 32) { const auto* src32 = reinterpret_cast<const uint32_t*>(src - src_align_bytes); uint32_t copy_cnt = min(len >> 2, 1024); if (t < copy_cnt) { uint32_t v = src32[t]; if (src_align_bits != 0) { v = __funnelshift_r(v, src32[t + 1], src_align_bits); } reinterpret_cast<uint32_t*>(dst)[t] = v; } src += copy_cnt * 4; dst += copy_cnt * 4; len -= copy_cnt * 4; } if (t < len) { dst[t] = src[t]; } } void gpuinflate(device_span<device_span<uint8_t const> const> inputs, device_span<device_span<uint8_t> const> outputs, device_span<compression_result> results, gzip_header_included parse_hdr, rmm::cuda_stream_view stream) { constexpr int block_size = 128; // Threads per block if (inputs.size() > 0) { hipLaunchKernelGGL(( inflate_kernel<block_size>) , dim3(inputs.size()), dim3(block_size), 0, stream.value(), inputs, outputs, results, parse_hdr); } } void gpu_copy_uncompressed_blocks(device_span<device_span<uint8_t const> const> inputs, device_span<device_span<uint8_t> const> outputs, rmm::cuda_stream_view stream) { if (inputs.size() > 0) { hipLaunchKernelGGL(( copy_uncompressed_kernel), dim3(inputs.size()), dim3(1024), 0, stream.value(), inputs, outputs); } } } // namespace io } // namespace cudf
b22dbe9ac871694eb7d142362a6779561c6c1d11.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** @file gpuinflate.cu Derived from zlib's contrib/puff.c, original copyright notice below */ /* Copyright (C) 2002-2013 Mark Adler, all rights reserved version 2.3, 21 Jan 2013 This software is provided 'as-is', without any express or implied warranty. In no event will the author be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. Mark Adler [email protected] */ #include "gpuinflate.hpp" #include "io_uncomp.hpp" #include <io/utilities/block_utils.cuh> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { constexpr int max_bits = 15; // maximum bits in a code constexpr int max_l_codes = 286; // maximum number of literal/length codes constexpr int max_d_codes = 30; // maximum number of distance codes constexpr int fix_l_codes = 288; // number of fixed literal/length codes constexpr int log2_len_lut = 10; constexpr int log2_dist_lut = 8; /** * @brief Intermediate arrays for building huffman tables */ struct scratch_arr { int16_t lengths[max_l_codes + max_d_codes]; ///< descriptor code lengths int16_t offs[max_bits + 1]; ///< offset in symbol table for each length (scratch) }; /** * @brief Huffman LUTs for length and distance codes */ struct lut_arr { int32_t lenlut[1 << log2_len_lut]; ///< LUT for length decoding int32_t distlut[1 << log2_dist_lut]; ///< LUT for fast distance decoding }; /// 4 batches of 32 symbols constexpr int log2_batch_count = 2; // 1..5 constexpr int log2_batch_size = 5; constexpr int batch_count = (1 << log2_batch_count); constexpr int batch_size = (1 << log2_batch_size); /** * @brief Inter-warp communication queue */ struct xwarp_s { int32_t batch_len[batch_count]; //< Length of each batch - <0:end, 0:not ready, >0:symbol count union { uint32_t symqueue[batch_count * batch_size]; uint8_t symqueue8[batch_count * batch_size * 4]; } u; }; #define ENABLE_PREFETCH 1 #if ENABLE_PREFETCH constexpr int log2_prefetch_size = 9; // Must be at least LOG2_BATCH_SIZE+3 constexpr int prefetch_size = (1 << log2_prefetch_size); /// @brief Prefetcher state struct prefetch_queue_s { const uint8_t* cur_p; ///< Prefetch location int run; ///< prefetcher will exit when run=0 uint8_t pref_data[prefetch_size]; }; template <typename T> inline __device__ volatile uint32_t* prefetch_addr32(volatile prefetch_queue_s& q, T* ptr) { return reinterpret_cast<volatile uint32_t*>(&q.pref_data[(prefetch_size - 4) & (size_t)(ptr)]); } #endif // ENABLE_PREFETCH /** * @brief Inflate decompressor state */ struct inflate_state_s { // output state uint8_t* out; ///< output buffer uint8_t* outbase; ///< start of output buffer uint8_t* outend; ///< end of output buffer // Input state uint8_t const* cur; ///< input buffer uint8_t const* end; ///< end of input buffer uint2 bitbuf; ///< bit buffer (64-bit) uint32_t bitpos; ///< position in bit buffer int32_t err; ///< Error status int btype; ///< current block type int blast; ///< last block uint32_t stored_blk_len; ///< length of stored (uncompressed) block uint16_t first_slow_len; ///< first code not in fast LUT uint16_t index_slow_len; uint16_t first_slow_dist; uint16_t index_slow_dist; volatile xwarp_s x; #if ENABLE_PREFETCH volatile prefetch_queue_s pref; #endif int16_t lencnt[max_bits + 1]; int16_t lensym[fix_l_codes]; // Assumes fix_l_codes >= max_l_codes int16_t distcnt[max_bits + 1]; int16_t distsym[max_d_codes]; union { scratch_arr scratch; lut_arr lut; } u; }; inline __device__ unsigned int bfe(unsigned int source, unsigned int bit_start, unsigned int num_bits) { unsigned int bits; asm("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"(source), "r"(bit_start), "r"(num_bits)); return bits; }; inline __device__ uint32_t showbits(inflate_state_s* s, uint32_t n) { uint32_t next32 = __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos); return (next32 & ((1 << n) - 1)); } inline __device__ uint32_t nextbits32(inflate_state_s* s) { return __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos); } inline __device__ void skipbits(inflate_state_s* s, uint32_t n) { uint32_t bitpos = s->bitpos + n; if (bitpos >= 32) { auto cur = s->cur + 8; s->bitbuf.x = s->bitbuf.y; s->bitbuf.y = (cur < s->end) ? *reinterpret_cast<uint32_t const*>(cur) : 0; s->cur = cur - 4; bitpos &= 0x1f; } s->bitpos = bitpos; } // TODO: If we require 4-byte alignment of input bitstream & length (padded), reading bits would // become quite a bit faster __device__ uint32_t getbits(inflate_state_s* s, uint32_t n) { uint32_t v = showbits(s, n); skipbits(s, n); return v; } /** * @brief Decode a code from the stream s using huffman table {symbols,counts}. * Return the symbol or a negative value if there is an error. * If all of the lengths are zero, i.e. an empty code, or if the code is * incomplete and an invalid code is received, then -10 is returned after * reading max_bits bits. * * Format notes: * * - The codes as stored in the compressed data are bit-reversed relative to * a simple integer ordering of codes of the same lengths. Hence below the * bits are pulled from the compressed data one at a time and used to * build the code value reversed from what is in the stream in order to * permit simple integer comparisons for decoding. A table-based decoding * scheme (as used in zlib) does not need to do this reversal. * * - The first code for the shortest length is all zeros. Subsequent codes of * the same length are simply integer increments of the previous code. When * moving up a length, a zero bit is appended to the code. For a complete * code, the last code of the longest length will be all ones. * * - Incomplete codes are handled by this decoder, since they are permitted * in the deflate format. See the format notes for fixed() and dynamic(). */ __device__ int decode(inflate_state_s* s, const int16_t* counts, const int16_t* symbols) { unsigned int len; // current number of bits in code unsigned int code; // len bits being decoded unsigned int first; // first code of length len unsigned int count; // number of codes of length len uint32_t next32r = __brev(nextbits32(s)); first = 0; for (len = 1; len <= max_bits; len++) { code = (next32r >> (32 - len)) - first; count = counts[len]; if (code < count) // if length len, return symbol { skipbits(s, len); return symbols[code]; } symbols += count; // else update for next length first += count; first <<= 1; } return -10; // ran out of codes } /** * @brief Given the list of code lengths length[0..n-1] representing a canonical * Huffman code for n symbols, construct the tables required to decode those * codes. Those tables are the number of codes of each length, and the symbols * sorted by length, retaining their original order within each length. The * return value is zero for a complete code set, negative for an over- * subscribed code set, and positive for an incomplete code set. The tables * can be used if the return value is zero or positive, but they cannot be used * if the return value is negative. If the return value is zero, it is not * possible for decode() using that table to return an error--any stream of * enough bits will resolve to a symbol. If the return value is positive, then * it is possible for decode() using that table to return an error for received * codes past the end of the incomplete lengths. * * Not used by decode(), but used for error checking, count[0] is the number * of the n symbols not in the code. So n - count[0] is the number of * codes. This is useful for checking for incomplete codes that have more than * one symbol, which is an error in a dynamic block. * * Assumption: for all i in 0..n-1, 0 <= length[i] <= max_bits * This is assured by the construction of the length arrays in dynamic() and * fixed() and is not verified by construct(). * * Format notes: * * - Permitted and expected examples of incomplete codes are one of the fixed * codes and any code with a single symbol which in deflate is coded as one * bit instead of zero bits. See the format notes for fixed() and dynamic(). * * - Within a given code length, the symbols are kept in ascending order for * the code bits definition. */ __device__ int construct( inflate_state_s* s, int16_t* counts, int16_t* symbols, const int16_t* length, int n) { int symbol; // current symbol when stepping through length[] int len; // current length when stepping through counts[] int left; // number of possible codes left of current length int16_t* offs = s->u.scratch.offs; // count number of codes of each length for (len = 0; len <= max_bits; len++) counts[len] = 0; for (symbol = 0; symbol < n; symbol++) (counts[length[symbol]])++; // assumes lengths are within bounds if (counts[0] == n) // no codes! return 0; // complete, but decode() will fail // check for an over-subscribed or incomplete set of lengths left = 1; // one possible code of zero length for (len = 1; len <= max_bits; len++) { left <<= 1; // one more bit, double codes left left -= counts[len]; // deduct count from possible codes if (left < 0) return left; // over-subscribed--return negative } // left > 0 means incomplete // generate offsets into symbol table for each length for sorting offs[1] = 0; for (len = 1; len < max_bits; len++) offs[len + 1] = offs[len] + counts[len]; // put symbols in table sorted by length, by symbol order within each length for (symbol = 0; symbol < n; symbol++) if (length[symbol] != 0) symbols[offs[length[symbol]]++] = symbol; // return zero for complete set, positive for incomplete set return left; } /// permutation of code length codes static const __device__ __constant__ uint8_t g_code_order[19 + 1] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, 0xff}; /// Dynamic block (custom huffman tables) __device__ int init_dynamic(inflate_state_s* s) { int nlen, ndist, ncode; /* number of lengths in descriptor */ int index; /* index of lengths[] */ int err; /* construct() return value */ int16_t* lengths = s->u.scratch.lengths; // get number of lengths in each table, check lengths nlen = getbits(s, 5) + 257; ndist = getbits(s, 5) + 1; ncode = getbits(s, 4) + 4; if (nlen > max_l_codes || ndist > max_d_codes) { return -3; // bad counts } // read code length code lengths (really), missing lengths are zero for (index = 0; index < ncode; index++) lengths[g_code_order[index]] = getbits(s, 3); for (; index < 19; index++) lengths[g_code_order[index]] = 0; // build huffman table for code lengths codes (use lencode temporarily) err = construct(s, s->lencnt, s->lensym, lengths, 19); if (err != 0) // require complete code set here return -4; // read length/literal and distance code length tables index = 0; while (index < nlen + ndist) { int symbol = decode(s, s->lencnt, s->lensym); if (symbol < 0) return symbol; // invalid symbol if (symbol < 16) // length in 0..15 lengths[index++] = symbol; else { // repeat instruction int len = 0; // last length to repeat, assume repeating zeros if (symbol == 16) { // repeat last length 3..6 times if (index == 0) return -5; // no last length! len = lengths[index - 1]; // last length symbol = 3 + getbits(s, 2); } else if (symbol == 17) // repeat zero 3..10 times symbol = 3 + getbits(s, 3); else // == 18, repeat zero 11..138 times symbol = 11 + getbits(s, 7); if (index + symbol > nlen + ndist) return -6; // too many lengths! while (symbol--) // repeat last or zero symbol times lengths[index++] = len; } } // check for end-of-block code -- there better be one! if (lengths[256] == 0) return -9; // build huffman table for literal/length codes err = construct(s, s->lencnt, s->lensym, lengths, nlen); if (err && (err < 0 || nlen != s->lencnt[0] + s->lencnt[1])) return -7; // incomplete code ok only for single length 1 code // build huffman table for distance codes err = construct(s, s->distcnt, s->distsym, &lengths[nlen], ndist); if (err && (err < 0 || ndist != s->distcnt[0] + s->distcnt[1])) return -8; // incomplete code ok only for single length 1 code return 0; } /** * @brief Initializes a fixed codes block. * * Format notes: * * - This block type can be useful for compressing small amounts of data for * which the size of the code descriptions in a dynamic block exceeds the * benefit of custom codes for that block. For fixed codes, no bits are * spent on code descriptions. Instead the code lengths for literal/length * codes and distance codes are fixed. The specific lengths for each symbol * can be seen in the "for" loops below. * * - The literal/length code is complete, but has two symbols that are invalid * and should result in an error if received. This cannot be implemented * simply as an incomplete code since those two symbols are in the "middle" * of the code. They are eight bits long and the longest literal/length\ * code is nine bits. Therefore the code must be constructed with those * symbols, and the invalid symbols must be detected after decoding. * * - The fixed distance codes also have two invalid symbols that should result * in an error if received. Since all of the distance codes are the same * length, this can be implemented as an incomplete code. Then the invalid * codes are detected while decoding. */ __device__ int init_fixed(inflate_state_s* s) { int16_t* lengths = s->u.scratch.lengths; int symbol; // literal/length table for (symbol = 0; symbol < 144; symbol++) lengths[symbol] = 8; for (; symbol < 256; symbol++) lengths[symbol] = 9; for (; symbol < 280; symbol++) lengths[symbol] = 7; for (; symbol < fix_l_codes; symbol++) lengths[symbol] = 8; construct(s, s->lencnt, s->lensym, lengths, fix_l_codes); // distance table for (symbol = 0; symbol < max_d_codes; symbol++) lengths[symbol] = 5; // build huffman table for distance codes construct(s, s->distcnt, s->distsym, lengths, max_d_codes); return 0; } /** * @brief Decode literal/length and distance codes until an end-of-block code. * * Format notes: * * - Compressed data that is after the block type if fixed or after the code * description if dynamic is a combination of literals and length/distance * pairs terminated by and end-of-block code. Literals are simply Huffman * coded bytes. A length/distance pair is a coded length followed by a * coded distance to represent a string that occurs earlier in the * uncompressed data that occurs again at the current location. * * - Literals, lengths, and the end-of-block code are combined into a single * code of up to 286 symbols. They are 256 literals (0..255), 29 length * symbols (257..285), and the end-of-block symbol (256). * * - There are 256 possible lengths (3..258), and so 29 symbols are not enough * to represent all of those. Lengths 3..10 and 258 are in fact represented * by just a length symbol. Lengths 11..257 are represented as a symbol and * some number of extra bits that are added as an integer to the base length * of the length symbol. The number of extra bits is determined by the base * length symbol. These are in the static arrays below, lens[] for the base * lengths and lext[] for the corresponding number of extra bits. * * - The reason that 258 gets its own symbol is that the longest length is used * often in highly redundant files. Note that 258 can also be coded as the * base value 227 plus the maximum extra value of 31. While a good deflate * should never do this, it is not an error, and should be decoded properly. * * - If a length is decoded, including its extra bits if any, then it is * followed a distance code. There are up to 30 distance symbols. Again * there are many more possible distances (1..32768), so extra bits are added * to a base value represented by the symbol. The distances 1..4 get their * own symbol, but the rest require extra bits. The base distances and * corresponding number of extra bits are below in the static arrays dist[] * and dext[]. * * - Literal bytes are simply written to the output. A length/distance pair is * an instruction to copy previously uncompressed bytes to the output. The * copy is from distance bytes back in the output stream, copying for length * bytes. * * - Distances pointing before the beginning of the output data are not * permitted. * * - Overlapped copies, where the length is greater than the distance, are * allowed and common. For example, a distance of one and a length of 258 * simply copies the last byte 258 times. A distance of four and a length of * twelve copies the last four bytes three times. A simple forward copy * ignoring whether the length is greater than the distance or not implements * this correctly. You should not use memcpy() since its behavior is not * defined for overlapped arrays. You should not use memmove() or bcopy() * since though their behavior -is- defined for overlapping arrays, it is * defined to do the wrong thing in this case. */ /// permutation of code length codes static const __device__ __constant__ uint16_t g_lens[29] = { // Size base for length codes 257..285 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258}; static const __device__ __constant__ uint16_t g_lext[29] = { // Extra bits for length codes 257..285 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0}; static const __device__ __constant__ uint16_t g_dists[30] = { // Offset base for distance codes 0..29 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577}; static const __device__ __constant__ uint16_t g_dext[30] = { // Extra bits for distance codes 0..29 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; /// @brief Thread 0 only: decode bitstreams and output symbols into the symbol queue __device__ void decode_symbols(inflate_state_s* s) { uint32_t bitpos = s->bitpos; uint2 bitbuf = s->bitbuf; auto cur = s->cur; auto end = s->end; int32_t batch = 0; int32_t sym, batch_len; do { volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size]; // Wait for the next batch entry to be empty #if ENABLE_PREFETCH // Wait for prefetcher to fetch a worst-case of 48 bits per symbol while ((*(volatile int32_t*)&s->pref.cur_p - (int32_t)(size_t)cur < batch_size * 6) || (s->x.batch_len[batch] != 0)) {} #else while (s->x.batch_len[batch] != 0) {} #endif batch_len = 0; #if ENABLE_PREFETCH if (cur + (bitpos >> 3) >= end) { s->err = 1; break; } #endif // Inner loop decoding symbols do { uint32_t next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s); uint32_t len; sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)]; if ((uint32_t)sym < (uint32_t)(0x100 << 5)) { // We can lookup a second symbol if this was a short literal len = sym & 0x1f; sym >>= 5; b[batch_len++] = sym; next32 >>= len; bitpos += len; sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)]; } if (sym > 0) // short symbol { len = sym & 0x1f; sym = ((sym >> 5) & 0x3ff) + ((next32 >> (sym >> 24)) & ((sym >> 16) & 0x1f)); } else { // Slow length path uint32_t next32r = __brev(next32); const int16_t* symbols = &s->lensym[s->index_slow_len]; unsigned int first = s->first_slow_len; int lext; #pragma unroll 1 for (len = log2_len_lut + 1; len <= max_bits; len++) { unsigned int code = (next32r >> (32 - len)) - first; unsigned int count = s->lencnt[len]; if (code < count) // if length len, return symbol { sym = symbols[code]; break; } symbols += count; // else update for next length first += count; first <<= 1; } if (len > max_bits) { s->err = -10; sym = 256; len = 0; } if (sym > 256) { sym -= 257; lext = g_lext[sym]; sym = 256 + g_lens[sym] + bfe(next32, len, lext); len += lext; } } if (sym > 256) { int dist, dext; // skipbits(s, len) inlined - no limit check bitpos += len; if (bitpos >= 32) { bitbuf.x = bitbuf.y; #if ENABLE_PREFETCH bitbuf.y = *prefetch_addr32(s->pref, cur + 8); cur += 4; #else cur += 8; bitbuf.y = (cur < end) ? *(const uint32_t*)cur : 0; cur -= 4; #endif bitpos &= 0x1f; } // get distance next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s); dist = s->u.lut.distlut[next32 & ((1 << log2_dist_lut) - 1)]; if (dist > 0) { len = dist & 0x1f; dext = bfe(dist, 20, 5); dist = bfe(dist, 5, 15); sym |= (dist + bfe(next32, len, dext)) << 16; len += dext; } else { uint32_t next32r = __brev(next32); const int16_t* symbols = &s->distsym[s->index_slow_dist]; unsigned int first = s->first_slow_dist; #pragma unroll 1 for (len = log2_dist_lut + 1; len <= max_bits; len++) { unsigned int code = (next32r >> (32 - len)) - first; unsigned int count = s->distcnt[len]; if (code < count) // if length len, return symbol { dist = symbols[code]; break; } symbols += count; // else update for next length first += count; first <<= 1; } if (len > max_bits) { s->err = -10; sym = 256; len = 0; } else { dext = g_dext[dist]; sym |= (g_dists[dist] + bfe(next32, len, dext)) << 16; len += dext; } } } // skipbits(s, len) inlined with added error check for reading past the end of the input // buffer bitpos += len; if (bitpos >= 32) { bitbuf.x = bitbuf.y; #if ENABLE_PREFETCH bitbuf.y = *prefetch_addr32(s->pref, cur + 8); cur += 4; #else cur += 8; if (cur < end) { bitbuf.y = *(const uint32_t*)cur; cur -= 4; } else { bitbuf.y = 0; cur -= 4; if (cur > end) { s->err = 1; sym = 256; } } #endif bitpos &= 0x1f; } if (sym == 256) break; b[batch_len++] = sym; } while (batch_len < batch_size - 1); s->x.batch_len[batch] = batch_len; #if ENABLE_PREFETCH ((volatile inflate_state_s*)s)->cur = cur; #endif if (batch_len != 0) batch = (batch + 1) & (batch_count - 1); } while (sym != 256); while (s->x.batch_len[batch] != 0) {} s->x.batch_len[batch] = -1; s->bitbuf = bitbuf; s->bitpos = bitpos; #if !ENABLE_PREFETCH s->cur = cur; #endif } /** * @brief Build lookup tables for faster decode * LUT format is symbols*16+length */ __device__ void init_length_lut(inflate_state_s* s, int t) { int32_t* lut = s->u.lut.lenlut; for (uint32_t bits = t; bits < (1 << log2_len_lut); bits += blockDim.x) { const int16_t* cnt = s->lencnt; const int16_t* symbols = s->lensym; int sym = -10 << 5; unsigned int first = 0; unsigned int rbits = __brev(bits) >> (32 - log2_len_lut); for (unsigned int len = 1; len <= log2_len_lut; len++) { unsigned int code = (rbits >> (log2_len_lut - len)) - first; unsigned int count = cnt[len]; if (code < count) { sym = symbols[code]; if (sym > 256) { int lext = g_lext[sym - 257]; sym = (256 + g_lens[sym - 257]) | (((1 << lext) - 1) << (16 - 5)) | (len << (24 - 5)); len += lext; } sym = (sym << 5) | len; break; } symbols += count; // else update for next length first += count; first <<= 1; } lut[bits] = sym; } if (!t) { unsigned int first = 0; unsigned int index = 0; const int16_t* cnt = s->lencnt; for (unsigned int len = 1; len <= log2_len_lut; len++) { unsigned int count = cnt[len]; index += count; first += count; first <<= 1; } s->first_slow_len = first; s->index_slow_len = index; } } /** * @brief Build lookup tables for faster decode of distance symbol * LUT format is symbols*16+length */ __device__ void init_distance_lut(inflate_state_s* s, int t) { int32_t* lut = s->u.lut.distlut; for (uint32_t bits = t; bits < (1 << log2_dist_lut); bits += blockDim.x) { const int16_t* cnt = s->distcnt; const int16_t* symbols = s->distsym; int sym = 0; unsigned int first = 0; unsigned int rbits = __brev(bits) >> (32 - log2_dist_lut); for (unsigned int len = 1; len <= log2_dist_lut; len++) { unsigned int code = (rbits >> (log2_dist_lut - len)) - first; unsigned int count = cnt[len]; if (code < count) { int dist = symbols[code]; int dext = g_dext[dist]; sym = g_dists[dist] | (dext << 15); sym = (sym << 5) | len; break; } symbols += count; // else update for next length first += count; first <<= 1; } lut[bits] = sym; } if (!t) { unsigned int first = 0; unsigned int index = 0; const int16_t* cnt = s->distcnt; for (unsigned int len = 1; len <= log2_dist_lut; len++) { unsigned int count = cnt[len]; index += count; first += count; first <<= 1; } s->first_slow_dist = first; s->index_slow_dist = index; } } /// @brief WARP1: process symbols and output uncompressed stream __device__ void process_symbols(inflate_state_s* s, int t) { uint8_t* out = s->out; const uint8_t* outend = s->outend; const uint8_t* outbase = s->outbase; int batch = 0; do { volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size]; int batch_len = 0; if (t == 0) { while ((batch_len = s->x.batch_len[batch]) == 0) {} } batch_len = shuffle(batch_len); if (batch_len < 0) { break; } auto const symt = (t < batch_len) ? b[t] : 256; auto const lit_mask = ballot(symt >= 256); auto pos = min((__ffs(lit_mask) - 1) & 0xff, 32); if (t == 0) { s->x.batch_len[batch] = 0; } if (t < pos && out + t < outend) { out[t] = symt; } out += pos; batch_len -= pos; while (batch_len > 0) { int dist, len, symbol; // Process a non-literal symbol symbol = shuffle(symt, pos); len = max((symbol & 0xffff) - 256, 0); // max should be unnecessary, but just in case dist = symbol >> 16; for (int i = t; i < len; i += 32) { const uint8_t* src = out + ((i >= dist) ? (i % dist) : i) - dist; uint8_t b = (src < outbase) ? 0 : *src; if (out + i < outend) { out[i] = b; } } out += len; pos++; batch_len--; // Process subsequent literals, if any if (!((lit_mask >> pos) & 1)) { len = min((__ffs(lit_mask >> pos) - 1) & 0xff, batch_len); symbol = shuffle(symt, (pos + t) & 0x1f); if (t < len && out + t < outend) { out[t] = symbol; } out += len; pos += len; batch_len -= len; } } batch = (batch + 1) & (batch_count - 1); } while (true); if (t == 0) { s->out = out; } } /** * @brief Initializes a stored block. * * Format notes: * * - After the two-bit stored block type (00), the stored block length and * stored bytes are byte-aligned for fast copying. Therefore any leftover * bits in the byte that has the last bit of the type, as many as seven, are * discarded. The value of the discarded bits are not defined and should not * be checked against any expectation. * * - The second inverted copy of the stored block length does not have to be * checked, but it's probably a good idea to do so anyway. * * - A stored block can have zero length. This is sometimes used to byte-align * subsets of the compressed data for random access or partial recovery. */ __device__ int init_stored(inflate_state_s* s) { uint32_t len, nlen; // length of stored block // Byte align if (s->bitpos & 7) { skipbits(s, 8 - (s->bitpos & 7)); } if (s->cur + (s->bitpos >> 3) >= s->end) { return 2; // Not enough input } // get length and check against its one's complement len = getbits(s, 16); nlen = getbits(s, 16); if (len != (nlen ^ 0xffff)) { return -2; // didn't match complement! } if (s->cur + (s->bitpos >> 3) + len > s->end) { return 2; // Not enough input } s->stored_blk_len = len; // done with a valid stored block return 0; } /// Copy bytes from stored block to destination __device__ void copy_stored(inflate_state_s* s, int t) { auto len = s->stored_blk_len; auto cur = s->cur + s->bitpos / 8; auto out = s->out; auto outend = s->outend; auto const slow_bytes = min(len, (int)((16 - reinterpret_cast<size_t>(out)) % 16)); // Slow copy until output is 16B aligned if (slow_bytes) { for (int i = t; i < slow_bytes; i += blockDim.x) { if (out + i < outend) { out[i] = cur[i]; // Input range has already been validated in init_stored() } } cur += slow_bytes; out += slow_bytes; len -= slow_bytes; } auto fast_bytes = len; if (out < outend) { fast_bytes = (int)min((size_t)fast_bytes, (outend - out)); } fast_bytes &= ~0xf; auto bitpos = ((int)((size_t)cur % 4)) * 8; auto cur4 = cur - (bitpos / 8); if (out < outend) { // Fast copy 16 bytes at a time for (int i = t * 16; i < fast_bytes; i += blockDim.x * 16) { uint4 u; u.x = *reinterpret_cast<const uint32_t*>(cur4 + i + 0 * 4); u.y = *reinterpret_cast<const uint32_t*>(cur4 + i + 1 * 4); u.z = *reinterpret_cast<const uint32_t*>(cur4 + i + 2 * 4); u.w = *reinterpret_cast<const uint32_t*>(cur4 + i + 3 * 4); if (bitpos != 0) { uint32_t v = (bitpos != 0) ? *reinterpret_cast<const uint32_t*>(cur4 + i + 4 * 4) : 0; u.x = __funnelshift_rc(u.x, u.y, bitpos); u.y = __funnelshift_rc(u.y, u.z, bitpos); u.z = __funnelshift_rc(u.z, u.w, bitpos); u.w = __funnelshift_rc(u.w, v, bitpos); } *reinterpret_cast<uint4*>(out + i) = u; } } cur += fast_bytes; out += fast_bytes; len -= fast_bytes; // Slow copy for remaining bytes for (int i = t; i < len; i += blockDim.x) { if (out + i < outend) { out[i] = cur[i]; // Input range has already been validated in init_stored() } } out += len; __syncthreads(); if (t == 0) { // Reset bitstream to end of block auto p = cur + len; auto prefix_bytes = (uint32_t)(((size_t)p) & 3); p -= prefix_bytes; s->cur = p; s->bitbuf.x = (p < s->end) ? *reinterpret_cast<uint32_t const*>(p) : 0; p += 4; s->bitbuf.y = (p < s->end) ? *reinterpret_cast<uint32_t const*>(p) : 0; s->bitpos = prefix_bytes * 8; s->out = out; } } #if ENABLE_PREFETCH __device__ void init_prefetcher(inflate_state_s* s, int t) { if (t == 0) { s->pref.cur_p = s->cur; s->pref.run = 1; } } __device__ void prefetch_warp(volatile inflate_state_s* s, int t) { const uint8_t* cur_p = s->pref.cur_p; const uint8_t* end = s->end; while (shuffle((t == 0) ? s->pref.run : 0)) { auto cur_lo = (int32_t)(size_t)cur_p; int do_pref = shuffle((t == 0) ? (cur_lo - *(volatile int32_t*)&s->cur < prefetch_size - 32 * 4 - 4) : 0); if (do_pref) { const uint8_t* p = cur_p + 4 * t; *prefetch_addr32(s->pref, p) = (p < end) ? *reinterpret_cast<const uint32_t*>(p) : 0; cur_p += 4 * 32; __threadfence_block(); __syncwarp(); if (!t) { s->pref.cur_p = cur_p; __threadfence_block(); } } } } #endif // ENABLE_PREFETCH /** * @brief Parse GZIP header * See https://tools.ietf.org/html/rfc1952 */ __device__ int parse_gzip_header(const uint8_t* src, size_t src_size) { int hdr_len = -1; if (src_size >= 18) { uint32_t sig = (src[0] << 16) | (src[1] << 8) | src[2]; if (sig == 0x1f'8b08) // 24-bit GZIP inflate signature {0x1f, 0x8b, 0x08} { uint8_t flags = src[3]; hdr_len = 10; if (flags & GZIPHeaderFlag::fextra) // Extra fields present { int xlen = src[hdr_len] | (src[hdr_len + 1] << 8); hdr_len += xlen; if (hdr_len >= src_size) return -1; } if (flags & GZIPHeaderFlag::fname) // Original file name present { // Skip zero-terminated string do { if (hdr_len >= src_size) return -1; } while (src[hdr_len++] != 0); } if (flags & GZIPHeaderFlag::fcomment) // Comment present { // Skip zero-terminated string do { if (hdr_len >= src_size) return -1; } while (src[hdr_len++] != 0); } if (flags & GZIPHeaderFlag::fhcrc) // Header CRC present { hdr_len += 2; } if (hdr_len + 8 >= src_size) hdr_len = -1; } } return hdr_len; } /** * @brief INFLATE decompression kernel * * blockDim {block_size,1,1} * * @tparam block_size Thread block dimension for this call * @param inputs Source and destination buffer information per block * @param outputs Destination buffer information per block * @param results Decompression status buffer per block * @param parse_hdr If nonzero, indicates that the compressed bitstream includes a GZIP header */ template <int block_size> __global__ void __launch_bounds__(block_size) inflate_kernel(device_span<device_span<uint8_t const> const> inputs, device_span<device_span<uint8_t> const> outputs, device_span<compression_result> results, gzip_header_included parse_hdr) { __shared__ __align__(16) inflate_state_s state_g; int t = threadIdx.x; int z = blockIdx.x; inflate_state_s* state = &state_g; if (!t) { auto p = inputs[z].data(); auto src_size = inputs[z].size(); // Parse header if needed state->err = 0; if (parse_hdr == gzip_header_included::YES) { int hdr_len = parse_gzip_header(p, src_size); src_size = (src_size >= 8) ? src_size - 8 : 0; // ignore footer if (hdr_len >= 0) { p += hdr_len; src_size -= hdr_len; } else { state->err = hdr_len; } } // Initialize shared state state->out = outputs[z].data(); state->outbase = state->out; state->outend = state->out + outputs[z].size(); state->end = p + src_size; auto const prefix_bytes = (uint32_t)(((size_t)p) & 3); p -= prefix_bytes; state->cur = p; state->bitbuf.x = (p < state->end) ? *reinterpret_cast<uint32_t const*>(p) : 0; p += 4; state->bitbuf.y = (p < state->end) ? *reinterpret_cast<uint32_t const*>(p) : 0; state->bitpos = prefix_bytes * 8; } __syncthreads(); // Main loop decoding blocks while (!state->err) { if (!t) { // Thread0: read last flag, block type and custom huffman tables if any if (state->cur + (state->bitpos >> 3) >= state->end) state->err = 2; else { state->blast = getbits(state, 1); state->btype = getbits(state, 2); if (state->btype == 0) state->err = init_stored(state); else if (state->btype == 1) state->err = init_fixed(state); else if (state->btype == 2) state->err = init_dynamic(state); else state->err = -1; // Invalid block } } __syncthreads(); if (!state->err && (state->btype == 1 || state->btype == 2)) { // Initializes lookup tables (block wide) init_length_lut(state, t); init_distance_lut(state, t); #if ENABLE_PREFETCH // Initialize prefetcher init_prefetcher(state, t); #endif if (t < batch_count) { state->x.batch_len[t] = 0; } __syncthreads(); // decode data until end-of-block code if (t < 1 * 32) { // WARP0: decode variable-length symbols if (!t) { // Thread0: decode symbols (single threaded) decode_symbols(state); #if ENABLE_PREFETCH state->pref.run = 0; #endif } } else if (t < 2 * 32) { // WARP1: perform LZ77 using length and distance codes from WARP0 process_symbols(state, t & 0x1f); } #if ENABLE_PREFETCH else if (t < 3 * 32) { // WARP2: Prefetcher: prefetch data for WARP0 prefetch_warp(state, t & 0x1f); } #endif // else WARP3: idle } else if (!state->err && state->btype == 0) { // Uncompressed block (block-wide memcpy) copy_stored(state, t); } if (state->blast) break; __syncthreads(); } __syncthreads(); // Output decompression status and length if (!t) { if (state->err == 0 && state->cur + ((state->bitpos + 7) >> 3) > state->end) { // Read past the end of the input buffer state->err = 2; } else if (state->err == 0 && state->out > state->outend) { // Output buffer too small state->err = 1; } results[z].bytes_written = state->out - state->outbase; results[z].status = [&]() { switch (state->err) { case 0: return compression_status::SUCCESS; case 1: return compression_status::OUTPUT_OVERFLOW; default: return compression_status::FAILURE; } }(); results[z].reserved = (int)(state->end - state->cur); // Here mainly for debug purposes } } /** * @brief Copy a group of buffers * * blockDim {1024,1,1} * * @param inputs Source and destination information per block */ __global__ void __launch_bounds__(1024) copy_uncompressed_kernel(device_span<device_span<uint8_t const> const> inputs, device_span<device_span<uint8_t> const> outputs) { __shared__ const uint8_t* volatile src_g; __shared__ uint8_t* volatile dst_g; __shared__ uint32_t volatile copy_len_g; uint32_t t = threadIdx.x; uint32_t z = blockIdx.x; const uint8_t* src; uint8_t* dst; uint32_t len, src_align_bytes, src_align_bits, dst_align_bytes; if (!t) { src = inputs[z].data(); dst = outputs[z].data(); len = static_cast<uint32_t>(min(inputs[z].size(), outputs[z].size())); src_g = src; dst_g = dst; copy_len_g = len; } __syncthreads(); src = src_g; dst = dst_g; len = copy_len_g; // Align output to 32-bit dst_align_bytes = 3 & -reinterpret_cast<intptr_t>(dst); if (dst_align_bytes != 0) { uint32_t align_len = min(dst_align_bytes, len); if (t < align_len) { dst[t] = src[t]; } src += align_len; dst += align_len; len -= align_len; } src_align_bytes = (uint32_t)(3 & reinterpret_cast<uintptr_t>(src)); src_align_bits = src_align_bytes << 3; while (len >= 32) { const auto* src32 = reinterpret_cast<const uint32_t*>(src - src_align_bytes); uint32_t copy_cnt = min(len >> 2, 1024); if (t < copy_cnt) { uint32_t v = src32[t]; if (src_align_bits != 0) { v = __funnelshift_r(v, src32[t + 1], src_align_bits); } reinterpret_cast<uint32_t*>(dst)[t] = v; } src += copy_cnt * 4; dst += copy_cnt * 4; len -= copy_cnt * 4; } if (t < len) { dst[t] = src[t]; } } void gpuinflate(device_span<device_span<uint8_t const> const> inputs, device_span<device_span<uint8_t> const> outputs, device_span<compression_result> results, gzip_header_included parse_hdr, rmm::cuda_stream_view stream) { constexpr int block_size = 128; // Threads per block if (inputs.size() > 0) { inflate_kernel<block_size> <<<inputs.size(), block_size, 0, stream.value()>>>(inputs, outputs, results, parse_hdr); } } void gpu_copy_uncompressed_blocks(device_span<device_span<uint8_t const> const> inputs, device_span<device_span<uint8_t> const> outputs, rmm::cuda_stream_view stream) { if (inputs.size() > 0) { copy_uncompressed_kernel<<<inputs.size(), 1024, 0, stream.value()>>>(inputs, outputs); } } } // namespace io } // namespace cudf
b358a4cc0b5132738ee9fdf15368b0d24c9b7eb2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <THH/THHNumerics.cuh> #include <c10/macros/Macros.h> #include <ATen/native/hip/LaunchUtils.h> #include <ATen/hip/HIPApplyUtils.cuh> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched static __device__ inline int p_start(int size, int pad, int kernel, int dilation, int stride) { return (size + pad < ((kernel - 1) * dilation + 1)) ? 0 : (size + pad - ((kernel - 1) * dilation + 1)) / stride + 1; } static __device__ inline int p_end(int size, int pad, int pooled_size, int stride) { return min((size + pad) / stride + 1, pooled_size); } // kernels borrowed from Caffe template <typename scalar_t, typename accscalar_t> __global__ void max_pool_forward_nchw(const int nthreads, const scalar_t* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* top_data, int64_t* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; accscalar_t maxval = at::numeric_limits<accscalar_t>::lower_bound(); // -Infinity int maxidx = hstart * width + wstart; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; h += dilation_h) { for (int w = wstart; w < wend; w += dilation_w) { scalar_t val = bottom_data[h * width + w]; if ((ScalarConvert<scalar_t, accscalar_t>::to(val) > maxval) || THCNumerics<scalar_t>::isnan(val)) { maxidx = h * width + w; maxval = ScalarConvert<scalar_t, accscalar_t>::to(val); } } } top_data[index] = ScalarConvert<scalar_t, accscalar_t>::to(maxval); top_mask[index] = maxidx; } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_forward_nhwc(const scalar_t* bottom_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int in_stride_c, const int in_stride_h, const int in_stride_w, scalar_t* top_data, int64_t* top_mask) { extern __shared__ int smem[]; int *out_mask_cached = smem; scalar_t *out_cached = reinterpret_cast<scalar_t*>(&out_mask_cached[channels*blockDim.y*blockDim.z]); // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (int i = thread_id; i < channels*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); out_mask_cached[i] = 0; } __syncthreads(); top_data = top_data + blockIdx.x * pooled_height * pooled_width * channels; top_mask = top_mask + blockIdx.x * pooled_height * pooled_width * channels; bottom_data = bottom_data + blockIdx.x * channels * height * width; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * channels]; out_mask_cached = &out_mask_cached[(threadIdx.z * blockDim.y + threadIdx.y) * channels]; int oH = (pooled_height + gridDim.z-1) / gridDim.z; int oW = (pooled_width + gridDim.y-1) / gridDim.y; int ostartH = threadIdx.z + blockIdx.z*oH; int oendH = ::min(ostartH+oH, pooled_height); int ostartW = threadIdx.y + blockIdx.y*oW; int oendW = ::min(ostartW+oW, pooled_width); for (int oh = ostartH; oh < oendH; oh+=blockDim.z) { for (int ow = ostartW; ow < oendW; ow+=blockDim.y) { int hstart = oh * stride_h - pad_h; int wstart = ow * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; for (int ih = hstart; ih < hend; ih++) { for (int iw = wstart; iw < wend; iw++) { const scalar_t *ptr_input = bottom_data + ih * in_stride_h + iw * in_stride_w; for(int c = threadIdx.x; c < channels; c+= blockDim.x) { scalar_t val = ptr_input[c]; if ((scalar_cast<accscalar_t>(val) > out_cached[c]) || THCNumerics<scalar_t>::isnan(val)) { out_cached[c] = scalar_cast<accscalar_t>(val); out_mask_cached[c] = ih * width + iw; } } } } scalar_t *ptr_output_data = top_data + (oh * pooled_width + ow) * channels; int64_t *ptr_output_mask = top_mask + (oh * pooled_width + ow) * channels; for(int c = threadIdx.x; c < channels; c+= blockDim.x) { ptr_output_data[c] = out_cached[c]; ptr_output_mask[c] = out_mask_cached[c]; out_cached[c] = scalar_t(0.0); out_mask_cached[c] = 0; } } } } static const int BLOCK_THREADS = 256; template <typename scalar_t, typename accscalar_t> #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 4) #else C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 8) #endif __global__ void max_pool_backward_nchw(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* bottom_diff) { CUDA_KERNEL_LOOP(index, height*width) { int h = index/width; int w = index - h * width; int phstart = p_start(h, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(h, pad_h, pooled_height, stride_h); int pwstart = p_start(w, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(w, pad_w, pooled_width, stride_w); for (int n = blockIdx.y; n < num; n += gridDim.y) for (int c = blockIdx.z; c < channels; c+= gridDim.z) { accscalar_t gradient = accscalar_t(0); int offset = (n * channels + c) * pooled_height * pooled_width; top_diff += offset; top_mask += offset; if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[ph * pooled_width + pw]); } } } } else { if (top_mask[phstart * pooled_width + pwstart] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[phstart * pooled_width + pwstart]); } } bottom_diff[(n*channels+c)*height*width+index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_backward_nhwc(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int out_stride_c, const int out_stride_h, const int out_stride_w, const int in_stride_c, const int in_stride_h, const int in_stride_w, scalar_t* bottom_diff) { extern __shared__ int smem[]; scalar_t *out_cached = reinterpret_cast<scalar_t*>(smem); int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; for (int i = thread_id; i < channels*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); } __syncthreads(); out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * channels]; bottom_diff = bottom_diff + blockIdx.x * height * width * channels; top_mask = top_mask + blockIdx.x * pooled_height * pooled_width * channels; top_diff = top_diff + blockIdx.x * pooled_height * pooled_width * channels; int iH = (height + gridDim.z-1) / gridDim.z; int iW = (width + gridDim.y-1) / gridDim.y; int istartH = threadIdx.z + blockIdx.z*iH; int iendH = ::min(istartH+iH, height); int istartW = threadIdx.y + blockIdx.y*iW; int iendW = ::min(istartW+iW, width); for (int ih = istartH; ih < iendH; ih+=blockDim.z) { for (int iw = istartW; iw < iendW; iw+=blockDim.y) { int phstart = p_start(ih, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(ih, pad_h, pooled_height, stride_h); int pwstart = p_start(iw, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(iw, pad_w, pooled_width, stride_w); if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for(int oh = phstart; oh < phend; ++oh) { for(int ow = pwstart; ow < pwend; ++ow) { const int64_t* ptr_top_mask = top_mask + oh * out_stride_h + ow * out_stride_w; for (int c = threadIdx.x; c < channels; c += blockDim.x) { if (ptr_top_mask[c] == ih * width + iw) { out_cached[c] += scalar_cast<scalar_t>(top_diff[oh * out_stride_h + ow * out_stride_w + c]); } } } } scalar_t *ptr_bottom_diff = bottom_diff + (ih * width + iw) * channels; for (int c = threadIdx.x; c < channels; c += blockDim.x) { ptr_bottom_diff[c] = out_cached[c]; out_cached[c] = scalar_t(0.0); } } else { const int64_t* ptr_top_mask = top_mask + phstart * out_stride_h + pwstart * out_stride_w; scalar_t *ptr_bottom_diff = bottom_diff + (ih * width + iw) * channels; for (int c = threadIdx.x; c < channels; c += blockDim.x) { if (ptr_top_mask[c] == ih * width + iw) { ptr_bottom_diff[c] = scalar_cast<scalar_t>(top_diff[phstart * out_stride_h + pwstart * out_stride_w + c]); } } } } } } void max_pool2d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(memory_format); const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); indices.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); output.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); indices.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); const int count = safe_downcast<int, int64_t>(output.numel()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(outputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(outputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int grid_x = nbatch; int grid_y = cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputWidth), block_y*BLOCK_STRIDE); int grid_z = cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputHeight), block_z*BLOCK_STRIDE); const dim3 grid(grid_x, grid_y, grid_z); hipLaunchKernelGGL(( max_pool_forward_nhwc<scalar_t, scalar_t>) , dim3(grid), dim3(block), nInputPlane * block_y * block_z * (sizeof(int) + sizeof(scalar_t)), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, in_stride_c, in_stride_h, in_stride_w, output_data, indices_data); break; } case MemoryFormat::Contiguous: { const int num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, BLOCK_THREADS); hipLaunchKernelGGL(( max_pool_forward_nchw<scalar_t, scalar_t>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); TORCH_CHECK(hipGetLastError() == hipSuccess, "max_pool2d_with_indices_out_cuda_frame failed with error code ", hipGetLastError()); if(input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); } } void max_pool2d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const Tensor input = input_.contiguous(memory_format); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); max_pool2d_backward_shape_check( input_, gradOutput_, indices, nbatch, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, /*cuda=*/ true); const Tensor gradOutput = gradOutput_.contiguous(memory_format); const int64_t out_stride_c = gradOutput.stride(-3); const int64_t out_stride_h = gradOutput.stride(-2); const int64_t out_stride_w = gradOutput.stride(-1); gradInput.resize_as_(input); gradInput.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); int64_t count = input.numel(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(inputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(inputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int grid_x = nbatch; int grid_y = cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputWidth), block_y*BLOCK_STRIDE); int grid_z = cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputHeight), block_z*BLOCK_STRIDE); const dim3 grid(grid_x, grid_y, grid_z); hipLaunchKernelGGL(( max_pool_backward_nhwc<scalar_t, accscalar_t>) , dim3(grid), dim3(block), nInputPlane * block_y * block_z * sizeof(scalar_t), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, out_stride_c, out_stride_h, out_stride_w, in_stride_c, in_stride_h, in_stride_w, gradInput_data); break; } case MemoryFormat::Contiguous: { int imgcount = inputWidth * inputHeight; dim3 grid; const int blocks = (imgcount + BLOCK_THREADS - 1) / BLOCK_THREADS; grid.x = blocks; grid.y = nbatch; uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; if (maxGridY < grid.y) grid.y = maxGridY; grid.z = nInputPlane; uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2]; if (maxGridZ < grid.z) grid.z = maxGridZ; hipLaunchKernelGGL(( max_pool_backward_nchw<scalar_t, accscalar_t>) , dim3(grid), dim3(BLOCK_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, gradInput_data); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); TORCH_CHECK(hipGetLastError() == hipSuccess, "fractional_max_pool2d_backward_out_cuda failed with error code ", hipGetLastError()); } } // namespace std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool2d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool2d_with_indices_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool2d_with_indices_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { auto gradInput = at::zeros_like(input); max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
b358a4cc0b5132738ee9fdf15368b0d24c9b7eb2.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <THC/THCNumerics.cuh> #include <c10/macros/Macros.h> #include <ATen/native/cuda/LaunchUtils.h> #include <ATen/cuda/CUDAApplyUtils.cuh> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched static __device__ inline int p_start(int size, int pad, int kernel, int dilation, int stride) { return (size + pad < ((kernel - 1) * dilation + 1)) ? 0 : (size + pad - ((kernel - 1) * dilation + 1)) / stride + 1; } static __device__ inline int p_end(int size, int pad, int pooled_size, int stride) { return min((size + pad) / stride + 1, pooled_size); } // kernels borrowed from Caffe template <typename scalar_t, typename accscalar_t> __global__ void max_pool_forward_nchw(const int nthreads, const scalar_t* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* top_data, int64_t* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; accscalar_t maxval = at::numeric_limits<accscalar_t>::lower_bound(); // -Infinity int maxidx = hstart * width + wstart; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; h += dilation_h) { for (int w = wstart; w < wend; w += dilation_w) { scalar_t val = bottom_data[h * width + w]; if ((ScalarConvert<scalar_t, accscalar_t>::to(val) > maxval) || THCNumerics<scalar_t>::isnan(val)) { maxidx = h * width + w; maxval = ScalarConvert<scalar_t, accscalar_t>::to(val); } } } top_data[index] = ScalarConvert<scalar_t, accscalar_t>::to(maxval); top_mask[index] = maxidx; } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_forward_nhwc(const scalar_t* bottom_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int in_stride_c, const int in_stride_h, const int in_stride_w, scalar_t* top_data, int64_t* top_mask) { extern __shared__ int smem[]; int *out_mask_cached = smem; scalar_t *out_cached = reinterpret_cast<scalar_t*>(&out_mask_cached[channels*blockDim.y*blockDim.z]); // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (int i = thread_id; i < channels*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); out_mask_cached[i] = 0; } __syncthreads(); top_data = top_data + blockIdx.x * pooled_height * pooled_width * channels; top_mask = top_mask + blockIdx.x * pooled_height * pooled_width * channels; bottom_data = bottom_data + blockIdx.x * channels * height * width; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * channels]; out_mask_cached = &out_mask_cached[(threadIdx.z * blockDim.y + threadIdx.y) * channels]; int oH = (pooled_height + gridDim.z-1) / gridDim.z; int oW = (pooled_width + gridDim.y-1) / gridDim.y; int ostartH = threadIdx.z + blockIdx.z*oH; int oendH = ::min(ostartH+oH, pooled_height); int ostartW = threadIdx.y + blockIdx.y*oW; int oendW = ::min(ostartW+oW, pooled_width); for (int oh = ostartH; oh < oendH; oh+=blockDim.z) { for (int ow = ostartW; ow < oendW; ow+=blockDim.y) { int hstart = oh * stride_h - pad_h; int wstart = ow * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; for (int ih = hstart; ih < hend; ih++) { for (int iw = wstart; iw < wend; iw++) { const scalar_t *ptr_input = bottom_data + ih * in_stride_h + iw * in_stride_w; for(int c = threadIdx.x; c < channels; c+= blockDim.x) { scalar_t val = ptr_input[c]; if ((scalar_cast<accscalar_t>(val) > out_cached[c]) || THCNumerics<scalar_t>::isnan(val)) { out_cached[c] = scalar_cast<accscalar_t>(val); out_mask_cached[c] = ih * width + iw; } } } } scalar_t *ptr_output_data = top_data + (oh * pooled_width + ow) * channels; int64_t *ptr_output_mask = top_mask + (oh * pooled_width + ow) * channels; for(int c = threadIdx.x; c < channels; c+= blockDim.x) { ptr_output_data[c] = out_cached[c]; ptr_output_mask[c] = out_mask_cached[c]; out_cached[c] = scalar_t(0.0); out_mask_cached[c] = 0; } } } } static const int BLOCK_THREADS = 256; template <typename scalar_t, typename accscalar_t> #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 4) #else C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 8) #endif __global__ void max_pool_backward_nchw(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* bottom_diff) { CUDA_KERNEL_LOOP(index, height*width) { int h = index/width; int w = index - h * width; int phstart = p_start(h, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(h, pad_h, pooled_height, stride_h); int pwstart = p_start(w, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(w, pad_w, pooled_width, stride_w); for (int n = blockIdx.y; n < num; n += gridDim.y) for (int c = blockIdx.z; c < channels; c+= gridDim.z) { accscalar_t gradient = accscalar_t(0); int offset = (n * channels + c) * pooled_height * pooled_width; top_diff += offset; top_mask += offset; if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[ph * pooled_width + pw]); } } } } else { if (top_mask[phstart * pooled_width + pwstart] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[phstart * pooled_width + pwstart]); } } bottom_diff[(n*channels+c)*height*width+index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_backward_nhwc(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int out_stride_c, const int out_stride_h, const int out_stride_w, const int in_stride_c, const int in_stride_h, const int in_stride_w, scalar_t* bottom_diff) { extern __shared__ int smem[]; scalar_t *out_cached = reinterpret_cast<scalar_t*>(smem); int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; for (int i = thread_id; i < channels*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); } __syncthreads(); out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * channels]; bottom_diff = bottom_diff + blockIdx.x * height * width * channels; top_mask = top_mask + blockIdx.x * pooled_height * pooled_width * channels; top_diff = top_diff + blockIdx.x * pooled_height * pooled_width * channels; int iH = (height + gridDim.z-1) / gridDim.z; int iW = (width + gridDim.y-1) / gridDim.y; int istartH = threadIdx.z + blockIdx.z*iH; int iendH = ::min(istartH+iH, height); int istartW = threadIdx.y + blockIdx.y*iW; int iendW = ::min(istartW+iW, width); for (int ih = istartH; ih < iendH; ih+=blockDim.z) { for (int iw = istartW; iw < iendW; iw+=blockDim.y) { int phstart = p_start(ih, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(ih, pad_h, pooled_height, stride_h); int pwstart = p_start(iw, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(iw, pad_w, pooled_width, stride_w); if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for(int oh = phstart; oh < phend; ++oh) { for(int ow = pwstart; ow < pwend; ++ow) { const int64_t* ptr_top_mask = top_mask + oh * out_stride_h + ow * out_stride_w; for (int c = threadIdx.x; c < channels; c += blockDim.x) { if (ptr_top_mask[c] == ih * width + iw) { out_cached[c] += scalar_cast<scalar_t>(top_diff[oh * out_stride_h + ow * out_stride_w + c]); } } } } scalar_t *ptr_bottom_diff = bottom_diff + (ih * width + iw) * channels; for (int c = threadIdx.x; c < channels; c += blockDim.x) { ptr_bottom_diff[c] = out_cached[c]; out_cached[c] = scalar_t(0.0); } } else { const int64_t* ptr_top_mask = top_mask + phstart * out_stride_h + pwstart * out_stride_w; scalar_t *ptr_bottom_diff = bottom_diff + (ih * width + iw) * channels; for (int c = threadIdx.x; c < channels; c += blockDim.x) { if (ptr_top_mask[c] == ih * width + iw) { ptr_bottom_diff[c] = scalar_cast<scalar_t>(top_diff[phstart * out_stride_h + pwstart * out_stride_w + c]); } } } } } } void max_pool2d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(memory_format); const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); indices.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); output.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); indices.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); const int count = safe_downcast<int, int64_t>(output.numel()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(outputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(outputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int grid_x = nbatch; int grid_y = cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputWidth), block_y*BLOCK_STRIDE); int grid_z = cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputHeight), block_z*BLOCK_STRIDE); const dim3 grid(grid_x, grid_y, grid_z); max_pool_forward_nhwc<scalar_t, scalar_t> <<<grid, block, nInputPlane * block_y * block_z * (sizeof(int) + sizeof(scalar_t)), at::cuda::getCurrentCUDAStream()>>>( input_data, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, in_stride_c, in_stride_h, in_stride_w, output_data, indices_data); break; } case MemoryFormat::Contiguous: { const int num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, BLOCK_THREADS); max_pool_forward_nchw<scalar_t, scalar_t> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "max_pool2d_with_indices_out_cuda_frame failed with error code ", cudaGetLastError()); if(input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); } } void max_pool2d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const Tensor input = input_.contiguous(memory_format); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); max_pool2d_backward_shape_check( input_, gradOutput_, indices, nbatch, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, /*cuda=*/ true); const Tensor gradOutput = gradOutput_.contiguous(memory_format); const int64_t out_stride_c = gradOutput.stride(-3); const int64_t out_stride_h = gradOutput.stride(-2); const int64_t out_stride_w = gradOutput.stride(-1); gradInput.resize_as_(input); gradInput.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); int64_t count = input.numel(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(inputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(inputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int grid_x = nbatch; int grid_y = cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputWidth), block_y*BLOCK_STRIDE); int grid_z = cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputHeight), block_z*BLOCK_STRIDE); const dim3 grid(grid_x, grid_y, grid_z); max_pool_backward_nhwc<scalar_t, accscalar_t> <<<grid, block, nInputPlane * block_y * block_z * sizeof(scalar_t), at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, out_stride_c, out_stride_h, out_stride_w, in_stride_c, in_stride_h, in_stride_w, gradInput_data); break; } case MemoryFormat::Contiguous: { int imgcount = inputWidth * inputHeight; dim3 grid; const int blocks = (imgcount + BLOCK_THREADS - 1) / BLOCK_THREADS; grid.x = blocks; grid.y = nbatch; uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; if (maxGridY < grid.y) grid.y = maxGridY; grid.z = nInputPlane; uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2]; if (maxGridZ < grid.z) grid.z = maxGridZ; max_pool_backward_nchw<scalar_t, accscalar_t> <<<grid, BLOCK_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, gradInput_data); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "fractional_max_pool2d_backward_out_cuda failed with error code ", cudaGetLastError()); } } // namespace std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool2d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool2d_with_indices_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool2d_with_indices_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { auto gradInput = at::zeros_like(input); max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
c3ab18c2ba82b4a5f33cae5d7067ce7b843ef441.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define imin(a,b) (a<b?a:b) const int threadsPerBlock = 256; __global__ void gpu_partial_dot_product( double *a, double *b, double *c, int N) { __shared__ double cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; double temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } double gpu_full_dot_product(const double *a, const double *b, int N) { const int blocksPerGrid = imin( 256, (N+threadsPerBlock-1) / threadsPerBlock ); double *partial_sum; double *dev_a, *dev_b, *dev_partial_sum; // allocate memory on the cpu side partial_sum = (double*)malloc( blocksPerGrid*sizeof(double) ); // allocate the memory on the GPU hipMalloc((void**)&dev_a, N*sizeof(double)); hipMalloc((void**)&dev_b, N*sizeof(double)); hipMalloc((void**)&dev_partial_sum, blocksPerGrid*sizeof(double)); // copy the arrays 'a' and 'b' to the GPU hipMemcpy( dev_a, a, N*sizeof(double), hipMemcpyHostToDevice); hipMemcpy( dev_b, b, N*sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( gpu_partial_dot_product), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_sum, N); hipMemcpy(partial_sum, dev_partial_sum, blocksPerGrid*sizeof(double), hipMemcpyDeviceToHost); double sum = 0; for (int i=0; i<blocksPerGrid; i++) { sum += partial_sum[i]; } hipFree(dev_a); hipFree(dev_b); hipFree(dev_partial_sum); // free memory on the cpu side free(partial_sum); return sum; }
c3ab18c2ba82b4a5f33cae5d7067ce7b843ef441.cu
#include <stdio.h> #define imin(a,b) (a<b?a:b) const int threadsPerBlock = 256; __global__ void gpu_partial_dot_product( double *a, double *b, double *c, int N) { __shared__ double cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; double temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } double gpu_full_dot_product(const double *a, const double *b, int N) { const int blocksPerGrid = imin( 256, (N+threadsPerBlock-1) / threadsPerBlock ); double *partial_sum; double *dev_a, *dev_b, *dev_partial_sum; // allocate memory on the cpu side partial_sum = (double*)malloc( blocksPerGrid*sizeof(double) ); // allocate the memory on the GPU cudaMalloc((void**)&dev_a, N*sizeof(double)); cudaMalloc((void**)&dev_b, N*sizeof(double)); cudaMalloc((void**)&dev_partial_sum, blocksPerGrid*sizeof(double)); // copy the arrays 'a' and 'b' to the GPU cudaMemcpy( dev_a, a, N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy( dev_b, b, N*sizeof(double), cudaMemcpyHostToDevice); gpu_partial_dot_product<<<blocksPerGrid,threadsPerBlock>>>(dev_a, dev_b, dev_partial_sum, N); cudaMemcpy(partial_sum, dev_partial_sum, blocksPerGrid*sizeof(double), cudaMemcpyDeviceToHost); double sum = 0; for (int i=0; i<blocksPerGrid; i++) { sum += partial_sum[i]; } cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_partial_sum); // free memory on the cpu side free(partial_sum); return sum; }
b634832f94ded6b349d1910e423fd4ef51291bf9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "disentangledAttentionPlugin.h" #include <hip/hip_fp16.h> #include <stdio.h> #define IND(i, j, k, dim) \ ((i) *dim.y * dim.z + (j) *dim.z + (k)) // caveat: must use brackets around var name! otherwise IND(i,j+3,k,dim) = // (i*dim.y*dim.z + j+3*dim.z + k)... namespace nvinfer1 { namespace plugin { using namespace nvinfer1; // template specialization for double/float template <typename TDataType, typename std::enable_if<std::is_same<std::decay_t<TDataType>, double>::value || std::is_same<std::decay_t<TDataType>, float>::value, TDataType>::type* dummy = nullptr> __forceinline__ __device__ void compute_attention( TDataType& res, const TDataType& res0, const TDataType& res1, const TDataType& res2, const TDataType& factor) { res = (res0 + res1 + res2) * factor; } // template specialization for half template <typename TDataType, typename std::enable_if<std::is_same<std::decay_t<TDataType>, __half>::value || std::is_same<std::decay_t<TDataType>, half>::value, TDataType>::type* dummy = nullptr> __forceinline__ __device__ void compute_attention( TDataType& res, const TDataType& res0, const TDataType& res1, const TDataType& res2, const TDataType& factor) { #if __CUDA_ARCH__ >= 530 // __hmul only supported >= sm_53 res = __hmul(__hadd(res0, __hadd(res1, res2)), factor); #else // for < sm_53, workaround/fallback is convert to float and downconvert res = __float2half((__half2float(res0) + __half2float(res1) + __half2float(res2)) * __half2float(factor)); #endif } // template specialization for int8 template <typename TDataType, typename std::enable_if<std::is_same<std::decay_t<TDataType>, int8_t>::value || std::is_same<std::decay_t<TDataType>, uint8_t>::value, TDataType>::type* dummy = nullptr> __forceinline__ __device__ void compute_attention( TDataType& res, const TDataType& res0, const TDataType& res1, const TDataType& res2, const TDataType& factor) { res = (res0 + res1 + res2) * factor; } /** * Fused kernel for Disentangled Attention design (first proposed in Microsoft DeBERTa), Version 2. * * @tparam TDataType type of the input data * @tparam tTileSize dimension of the shared memory tile (square) and also the BlockDimX * @tparam tBlockDimY 2D thread block is (tTileSize, tBlockDimY) * @param data0 content-to-content ("c2c") attention QcKc^T * @param data1 content-to-position ("c2p") attention QcKr^T * @param data2 position-to-content ("p2c") attention KcQr^T * @param result attention result * @param dimData0, dimData1, dimData2, dimResult dimension of the tensors * @param factor scaling factor applied on attention for stabilizing model training, 1/sqrt(3d), d is hidden size per * head = H/N. H is hidden size, N is number of heads * @param span relative distance hyper-parameter, k, in Disentangled attention * @note C++ 17 and above due to constexpr if */ template <typename TDataType = __half, int32_t tTileSize = 32, int32_t tBlockDimY = 8> __global__ void GatherAddGatherTransposeAddMul_fused(TDataType const* data0, TDataType const* data1, TDataType const* data2, TDataType* result, dim3 dimData0, dim3 dimData1, dim3 dimData2, dim3 dimResult, TDataType factor, int32_t span) { // Tile size should be a multiple of number of block rows assert(tBlockDimY * (tTileSize / tBlockDimY) == tTileSize); // map block to the output (result) int32_t i; int32_t j; int32_t k; int32_t c; int32_t ty; TDataType res0; TDataType res1; TDataType res2; TDataType res; #if kDISENTANGLED_VERSION == 2 int32_t bucket; int32_t mid = span / 2; int32_t index; // tmp values are precomputed for re-use; must be at least float to ensure accuracy float tmp1 = logf(mid); // Multiply by (1 - epsilon) to ensure that taking the ceil of approximately an integer // results in that integer when computing the bucket later on. // This corrects for the mathematical imprecision from using float. constexpr float kEPSILON = 1e-7; float tmp = (mid - 1) / (logf(dimResult.y - 1) - tmp1) * (1 - kEPSILON); #endif __shared__ TDataType T[tTileSize][tTileSize + 1]; // +1 to avoid bank conflict // (i,j,k) location of data2 (transposed) i = blockIdx.z; j = blockIdx.x * tTileSize + threadIdx.y; k = blockIdx.y * tTileSize + threadIdx.x; // gather data2 #pragma unroll for (c = 0, ty = 0; c < tTileSize / tBlockDimY; c++, ty += tBlockDimY) { #if kDISENTANGLED_VERSION == 1 // relative position -- version 1 if (k - (j + ty) >= span) { res2 = data2[IND(i, j + ty, 2 * span - 1, dimData2)]; } else if (k - (j + ty) <= -span) { res2 = data2[IND(i, j + ty, 0, dimData2)]; } else { res2 = data2[IND(i, j + ty, k - (j + ty) + span, dimData2)]; // compute index on the fly } T[ty + threadIdx.y][threadIdx.x] = res2; #elif kDISENTANGLED_VERSION == 2 // relative position w/ log bucket -- version 2 if (k - (j + ty) >= -mid && k - (j + ty) <= mid) { // preserved region, (i - j) + span bucket = k - (j + ty); } else { // log bucket region, bucket(i,j) + span bucket = ceilf((logf(fabsf(k - (j + ty))) - tmp1) * tmp) + mid; bucket = k - (j + ty) < 0 ? -bucket : bucket; } // clamp [0,2k]. Although this is guaranteed by equation, but numerically the floating precision can still break // boundary index = bucket + span; index = min(max(0, index), 2 * span - 1); res2 = data2[IND(i, j + ty, index, dimData2)]; T[ty + threadIdx.y][threadIdx.x] = res2; #endif } __syncthreads(); // (i,j,k) location of data1 (non-transposed) and output. i unchanged j = blockIdx.y * tTileSize + threadIdx.y; k = blockIdx.x * tTileSize + threadIdx.x; // read data0 + gather data1 + add all + write #pragma unroll for (c = 0, ty = 0; c < tTileSize / tBlockDimY; c++, ty += tBlockDimY) { #if kDISENTANGLED_VERSION == 1 // relative position -- version 1 // for non-transposed matrix 1, just fetch element at the transposed location & add to the result) if (j + ty - k <= -span) { res1 = data1[IND(i, j + ty, 0, dimData1)]; } else if (j + ty - k >= span) { res1 = data1[IND(i, j + ty, 2 * span - 1, dimData1)]; } else { res1 = data1[IND(i, j + ty, j + ty - k + span, dimData1)]; // compute index on the fly } #elif kDISENTANGLED_VERSION == 2 // relative position w/ log bucket -- version 2 if (j + ty - k >= -mid && j + ty - k <= mid) { // preserved region, (i - j) + span bucket = j + ty - k; } else { // log bucket region, bucket(i,j) + span bucket = ceilf((logf(fabsf((j + ty) - k)) - tmp1) * tmp) + mid; bucket = (j + ty) - k < 0 ? -bucket : bucket; } // clamp [0,2k]. Although this is guaranteed by equation, but numerically the floating precision can still break // boundary index = bucket + span; index = min(max(0, index), 2 * span - 1); res1 = data1[IND(i, j + ty, index, dimData1)]; #endif // for non-tranposed matrix 0, same as matrix 1 res0 = data0[IND(i, j + ty, k, dimData0)]; // (res0 + res1 + res2) / sqrt(3d), d is the hidden states size per head #if __cplusplus >= 201703L // C++ 17 has more convenient `if constexpr` for conditional implementation at compile time; before C++ 17, // switch to template specialization if constexpr (std::is_same<TDataType, double>::value || std::is_same<TDataType, float>::value) { // double, float32 res = (res0 + res1 + T[threadIdx.x][ty + threadIdx.y]) * factor; } else if constexpr (std::is_same<TDataType, __half>::value || std::is_same<TDataType, half>::value) { // fp16 #if __CUDA_ARCH__ >= 530 // __hmul only supported >= sm_53 res = __hmul(__hadd(res0, __hadd(res1, T[threadIdx.x][ty + threadIdx.y])), factor); #else // for < sm_53, workaround/fallback is convert to float and downconvert res = __float2half( (__half2float(res0) + __half2float(res1) + __half2float(T[threadIdx.x][ty + threadIdx.y])) * __half2float(factor)); #endif } else if constexpr (std::is_same<TDataType, int8_t>::value || std::is_same<TDataType, uint8_t>::value) { // int8_t res = (res0 + res1 + T[threadIdx.x][ty + threadIdx.y]) * factor; } #else // before C++ 17, use template specialization compute_attention<TDataType>(res, res0, res1, T[threadIdx.x][ty + threadIdx.y], factor); #endif // write result[IND(i, j + ty, k, dimResult)] = res; } } template <typename TDataType, int32_t tTileSize, int32_t tBlockDimY> void disentangled_kernel_wrapper(TDataType const* data0, TDataType const* data1, TDataType const* data2, TDataType* result, dim3 dimData0, dim3 dimData1, dim3 dimData2, dim3 dimResult, TDataType factor, int32_t span, dim3 block, dim3 grid, hipStream_t stream) { hipLaunchKernelGGL(( GatherAddGatherTransposeAddMul_fused<TDataType, tTileSize, tBlockDimY>), dim3(grid), dim3(block), 0, stream, data0, data1, data2, result, dimData0, dimData1, dimData2, dimResult, factor, span); } template void disentangled_kernel_wrapper<float, kDISENTANGLED_TILESIZE_V1, kDISENTANGLED_BLOCKDIMY_V1>( float const*, float const*, float const*, float*, dim3, dim3, dim3, dim3, float, int32_t, dim3, dim3, hipStream_t); template void disentangled_kernel_wrapper<__half, kDISENTANGLED_TILESIZE_V1, kDISENTANGLED_BLOCKDIMY_V1>(__half const*, __half const*, __half const*, __half*, dim3, dim3, dim3, dim3, __half, int32_t, dim3, dim3, hipStream_t); template void disentangled_kernel_wrapper<int8_t, kDISENTANGLED_TILESIZE_V1, kDISENTANGLED_BLOCKDIMY_V1>(int8_t const*, int8_t const*, int8_t const*, int8_t*, dim3, dim3, dim3, dim3, int8_t, int32_t, dim3, dim3, hipStream_t); template void disentangled_kernel_wrapper<float, kDISENTANGLED_TILESIZE_V2, kDISENTANGLED_BLOCKDIMY_V2>( float const*, float const*, float const*, float*, dim3, dim3, dim3, dim3, float, int32_t, dim3, dim3, hipStream_t); template void disentangled_kernel_wrapper<__half, kDISENTANGLED_TILESIZE_V2, kDISENTANGLED_BLOCKDIMY_V2>(__half const*, __half const*, __half const*, __half*, dim3, dim3, dim3, dim3, __half, int32_t, dim3, dim3, hipStream_t); template void disentangled_kernel_wrapper<int8_t, kDISENTANGLED_TILESIZE_V2, kDISENTANGLED_BLOCKDIMY_V2>(int8_t const*, int8_t const*, int8_t const*, int8_t*, dim3, dim3, dim3, dim3, int8_t, int32_t, dim3, dim3, hipStream_t); #undef IND } /* plugin */ } // namespace nvinfer1
b634832f94ded6b349d1910e423fd4ef51291bf9.cu
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "disentangledAttentionPlugin.h" #include <cuda_fp16.h> #include <stdio.h> #define IND(i, j, k, dim) \ ((i) *dim.y * dim.z + (j) *dim.z + (k)) // caveat: must use brackets around var name! otherwise IND(i,j+3,k,dim) = // (i*dim.y*dim.z + j+3*dim.z + k)... namespace nvinfer1 { namespace plugin { using namespace nvinfer1; // template specialization for double/float template <typename TDataType, typename std::enable_if<std::is_same<std::decay_t<TDataType>, double>::value || std::is_same<std::decay_t<TDataType>, float>::value, TDataType>::type* dummy = nullptr> __forceinline__ __device__ void compute_attention( TDataType& res, const TDataType& res0, const TDataType& res1, const TDataType& res2, const TDataType& factor) { res = (res0 + res1 + res2) * factor; } // template specialization for half template <typename TDataType, typename std::enable_if<std::is_same<std::decay_t<TDataType>, __half>::value || std::is_same<std::decay_t<TDataType>, half>::value, TDataType>::type* dummy = nullptr> __forceinline__ __device__ void compute_attention( TDataType& res, const TDataType& res0, const TDataType& res1, const TDataType& res2, const TDataType& factor) { #if __CUDA_ARCH__ >= 530 // __hmul only supported >= sm_53 res = __hmul(__hadd(res0, __hadd(res1, res2)), factor); #else // for < sm_53, workaround/fallback is convert to float and downconvert res = __float2half((__half2float(res0) + __half2float(res1) + __half2float(res2)) * __half2float(factor)); #endif } // template specialization for int8 template <typename TDataType, typename std::enable_if<std::is_same<std::decay_t<TDataType>, int8_t>::value || std::is_same<std::decay_t<TDataType>, uint8_t>::value, TDataType>::type* dummy = nullptr> __forceinline__ __device__ void compute_attention( TDataType& res, const TDataType& res0, const TDataType& res1, const TDataType& res2, const TDataType& factor) { res = (res0 + res1 + res2) * factor; } /** * Fused kernel for Disentangled Attention design (first proposed in Microsoft DeBERTa), Version 2. * * @tparam TDataType type of the input data * @tparam tTileSize dimension of the shared memory tile (square) and also the BlockDimX * @tparam tBlockDimY 2D thread block is (tTileSize, tBlockDimY) * @param data0 content-to-content ("c2c") attention QcKc^T * @param data1 content-to-position ("c2p") attention QcKr^T * @param data2 position-to-content ("p2c") attention KcQr^T * @param result attention result * @param dimData0, dimData1, dimData2, dimResult dimension of the tensors * @param factor scaling factor applied on attention for stabilizing model training, 1/sqrt(3d), d is hidden size per * head = H/N. H is hidden size, N is number of heads * @param span relative distance hyper-parameter, k, in Disentangled attention * @note C++ 17 and above due to constexpr if */ template <typename TDataType = __half, int32_t tTileSize = 32, int32_t tBlockDimY = 8> __global__ void GatherAddGatherTransposeAddMul_fused(TDataType const* data0, TDataType const* data1, TDataType const* data2, TDataType* result, dim3 dimData0, dim3 dimData1, dim3 dimData2, dim3 dimResult, TDataType factor, int32_t span) { // Tile size should be a multiple of number of block rows assert(tBlockDimY * (tTileSize / tBlockDimY) == tTileSize); // map block to the output (result) int32_t i; int32_t j; int32_t k; int32_t c; int32_t ty; TDataType res0; TDataType res1; TDataType res2; TDataType res; #if kDISENTANGLED_VERSION == 2 int32_t bucket; int32_t mid = span / 2; int32_t index; // tmp values are precomputed for re-use; must be at least float to ensure accuracy float tmp1 = logf(mid); // Multiply by (1 - epsilon) to ensure that taking the ceil of approximately an integer // results in that integer when computing the bucket later on. // This corrects for the mathematical imprecision from using float. constexpr float kEPSILON = 1e-7; float tmp = (mid - 1) / (logf(dimResult.y - 1) - tmp1) * (1 - kEPSILON); #endif __shared__ TDataType T[tTileSize][tTileSize + 1]; // +1 to avoid bank conflict // (i,j,k) location of data2 (transposed) i = blockIdx.z; j = blockIdx.x * tTileSize + threadIdx.y; k = blockIdx.y * tTileSize + threadIdx.x; // gather data2 #pragma unroll for (c = 0, ty = 0; c < tTileSize / tBlockDimY; c++, ty += tBlockDimY) { #if kDISENTANGLED_VERSION == 1 // relative position -- version 1 if (k - (j + ty) >= span) { res2 = data2[IND(i, j + ty, 2 * span - 1, dimData2)]; } else if (k - (j + ty) <= -span) { res2 = data2[IND(i, j + ty, 0, dimData2)]; } else { res2 = data2[IND(i, j + ty, k - (j + ty) + span, dimData2)]; // compute index on the fly } T[ty + threadIdx.y][threadIdx.x] = res2; #elif kDISENTANGLED_VERSION == 2 // relative position w/ log bucket -- version 2 if (k - (j + ty) >= -mid && k - (j + ty) <= mid) { // preserved region, (i - j) + span bucket = k - (j + ty); } else { // log bucket region, bucket(i,j) + span bucket = ceilf((logf(fabsf(k - (j + ty))) - tmp1) * tmp) + mid; bucket = k - (j + ty) < 0 ? -bucket : bucket; } // clamp [0,2k]. Although this is guaranteed by equation, but numerically the floating precision can still break // boundary index = bucket + span; index = min(max(0, index), 2 * span - 1); res2 = data2[IND(i, j + ty, index, dimData2)]; T[ty + threadIdx.y][threadIdx.x] = res2; #endif } __syncthreads(); // (i,j,k) location of data1 (non-transposed) and output. i unchanged j = blockIdx.y * tTileSize + threadIdx.y; k = blockIdx.x * tTileSize + threadIdx.x; // read data0 + gather data1 + add all + write #pragma unroll for (c = 0, ty = 0; c < tTileSize / tBlockDimY; c++, ty += tBlockDimY) { #if kDISENTANGLED_VERSION == 1 // relative position -- version 1 // for non-transposed matrix 1, just fetch element at the transposed location & add to the result) if (j + ty - k <= -span) { res1 = data1[IND(i, j + ty, 0, dimData1)]; } else if (j + ty - k >= span) { res1 = data1[IND(i, j + ty, 2 * span - 1, dimData1)]; } else { res1 = data1[IND(i, j + ty, j + ty - k + span, dimData1)]; // compute index on the fly } #elif kDISENTANGLED_VERSION == 2 // relative position w/ log bucket -- version 2 if (j + ty - k >= -mid && j + ty - k <= mid) { // preserved region, (i - j) + span bucket = j + ty - k; } else { // log bucket region, bucket(i,j) + span bucket = ceilf((logf(fabsf((j + ty) - k)) - tmp1) * tmp) + mid; bucket = (j + ty) - k < 0 ? -bucket : bucket; } // clamp [0,2k]. Although this is guaranteed by equation, but numerically the floating precision can still break // boundary index = bucket + span; index = min(max(0, index), 2 * span - 1); res1 = data1[IND(i, j + ty, index, dimData1)]; #endif // for non-tranposed matrix 0, same as matrix 1 res0 = data0[IND(i, j + ty, k, dimData0)]; // (res0 + res1 + res2) / sqrt(3d), d is the hidden states size per head #if __cplusplus >= 201703L // C++ 17 has more convenient `if constexpr` for conditional implementation at compile time; before C++ 17, // switch to template specialization if constexpr (std::is_same<TDataType, double>::value || std::is_same<TDataType, float>::value) { // double, float32 res = (res0 + res1 + T[threadIdx.x][ty + threadIdx.y]) * factor; } else if constexpr (std::is_same<TDataType, __half>::value || std::is_same<TDataType, half>::value) { // fp16 #if __CUDA_ARCH__ >= 530 // __hmul only supported >= sm_53 res = __hmul(__hadd(res0, __hadd(res1, T[threadIdx.x][ty + threadIdx.y])), factor); #else // for < sm_53, workaround/fallback is convert to float and downconvert res = __float2half( (__half2float(res0) + __half2float(res1) + __half2float(T[threadIdx.x][ty + threadIdx.y])) * __half2float(factor)); #endif } else if constexpr (std::is_same<TDataType, int8_t>::value || std::is_same<TDataType, uint8_t>::value) { // int8_t res = (res0 + res1 + T[threadIdx.x][ty + threadIdx.y]) * factor; } #else // before C++ 17, use template specialization compute_attention<TDataType>(res, res0, res1, T[threadIdx.x][ty + threadIdx.y], factor); #endif // write result[IND(i, j + ty, k, dimResult)] = res; } } template <typename TDataType, int32_t tTileSize, int32_t tBlockDimY> void disentangled_kernel_wrapper(TDataType const* data0, TDataType const* data1, TDataType const* data2, TDataType* result, dim3 dimData0, dim3 dimData1, dim3 dimData2, dim3 dimResult, TDataType factor, int32_t span, dim3 block, dim3 grid, cudaStream_t stream) { GatherAddGatherTransposeAddMul_fused<TDataType, tTileSize, tBlockDimY><<<grid, block, 0, stream>>>( data0, data1, data2, result, dimData0, dimData1, dimData2, dimResult, factor, span); } template void disentangled_kernel_wrapper<float, kDISENTANGLED_TILESIZE_V1, kDISENTANGLED_BLOCKDIMY_V1>( float const*, float const*, float const*, float*, dim3, dim3, dim3, dim3, float, int32_t, dim3, dim3, cudaStream_t); template void disentangled_kernel_wrapper<__half, kDISENTANGLED_TILESIZE_V1, kDISENTANGLED_BLOCKDIMY_V1>(__half const*, __half const*, __half const*, __half*, dim3, dim3, dim3, dim3, __half, int32_t, dim3, dim3, cudaStream_t); template void disentangled_kernel_wrapper<int8_t, kDISENTANGLED_TILESIZE_V1, kDISENTANGLED_BLOCKDIMY_V1>(int8_t const*, int8_t const*, int8_t const*, int8_t*, dim3, dim3, dim3, dim3, int8_t, int32_t, dim3, dim3, cudaStream_t); template void disentangled_kernel_wrapper<float, kDISENTANGLED_TILESIZE_V2, kDISENTANGLED_BLOCKDIMY_V2>( float const*, float const*, float const*, float*, dim3, dim3, dim3, dim3, float, int32_t, dim3, dim3, cudaStream_t); template void disentangled_kernel_wrapper<__half, kDISENTANGLED_TILESIZE_V2, kDISENTANGLED_BLOCKDIMY_V2>(__half const*, __half const*, __half const*, __half*, dim3, dim3, dim3, dim3, __half, int32_t, dim3, dim3, cudaStream_t); template void disentangled_kernel_wrapper<int8_t, kDISENTANGLED_TILESIZE_V2, kDISENTANGLED_BLOCKDIMY_V2>(int8_t const*, int8_t const*, int8_t const*, int8_t*, dim3, dim3, dim3, dim3, int8_t, int32_t, dim3, dim3, cudaStream_t); #undef IND } /* plugin */ } // namespace nvinfer1
b30ae363a8ed2f9772e70d4a832d0904a2e3aef9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layers/roi_pooling_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ROIPoolForwardMax(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; int roi_start_w = round(bottom_rois[1] * spatial_scale); int roi_start_h = round(bottom_rois[2] * spatial_scale); int roi_end_w = round(bottom_rois[3] * spatial_scale); int roi_end_h = round(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> __global__ void ROIPoolForwardAve(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; int roi_start_w = round(bottom_rois[1] * spatial_scale); int roi_start_h = round(bottom_rois[2] * spatial_scale); int roi_end_w = round(bottom_rois[3] * spatial_scale); int roi_end_h = round(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); Dtype aveval = Dtype(0); int pool_size = (hend - hstart) * (wend - wstart); bool is_empty = (hend <= hstart) || (wend <= wstart); if (is_empty || !pool_size) { top_data[index] = 0; } else { bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; aveval += bottom_data[bottom_index]; } } top_data[index] = aveval / pool_size; } } } template <typename Dtype> void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) switch (this->layer_param_.roi_pooling_param().pool()) { case ROIPoolingParameter_PoolMethod_MAX: hipLaunchKernelGGL(( ROIPoolForwardMax<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); break; case ROIPoolingParameter_PoolMethod_AVE: hipLaunchKernelGGL(( ROIPoolForwardAve<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIPoolBackwardMax(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); // NOLINT phend = min(max(phend, 0), pooled_height); // NOLINT pwstart = min(max(pwstart, 0), pooled_width); // NOLINT pwend = min(max(pwend, 0), pooled_width); // NOLINT for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) { gradient += offset_top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void ROIPoolBackwardAve(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); // NOLINT phend = min(max(phend, 0), pooled_height); // NOLINT pwstart = min(max(pwstart, 0), pooled_width); // NOLINT pwend = min(max(pwend, 0), pooled_width); // NOLINT for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); int pool_size = (hend - hstart) * (wend - wstart); if (pool_size > 0) { gradient += offset_top_diff[ph * pooled_width + pw] / pool_size; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to roi inputs."; } if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) switch (this->layer_param_.roi_pooling_param().pool()) { case ROIPoolingParameter_PoolMethod_MAX: hipLaunchKernelGGL(( ROIPoolBackwardMax<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); break; case ROIPoolingParameter_PoolMethod_AVE: hipLaunchKernelGGL(( ROIPoolBackwardAve<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer); } // namespace caffe
b30ae363a8ed2f9772e70d4a832d0904a2e3aef9.cu
#include <cfloat> #include <vector> #include "caffe/layers/roi_pooling_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ROIPoolForwardMax(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; int roi_start_w = round(bottom_rois[1] * spatial_scale); int roi_start_h = round(bottom_rois[2] * spatial_scale); int roi_end_w = round(bottom_rois[3] * spatial_scale); int roi_end_h = round(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> __global__ void ROIPoolForwardAve(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; int roi_start_w = round(bottom_rois[1] * spatial_scale); int roi_start_h = round(bottom_rois[2] * spatial_scale); int roi_end_w = round(bottom_rois[3] * spatial_scale); int roi_end_h = round(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); Dtype aveval = Dtype(0); int pool_size = (hend - hstart) * (wend - wstart); bool is_empty = (hend <= hstart) || (wend <= wstart); if (is_empty || !pool_size) { top_data[index] = 0; } else { bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; aveval += bottom_data[bottom_index]; } } top_data[index] = aveval / pool_size; } } } template <typename Dtype> void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) switch (this->layer_param_.roi_pooling_param().pool()) { case ROIPoolingParameter_PoolMethod_MAX: ROIPoolForwardMax<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); break; case ROIPoolingParameter_PoolMethod_AVE: ROIPoolForwardAve<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIPoolBackwardMax(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); // NOLINT phend = min(max(phend, 0), pooled_height); // NOLINT pwstart = min(max(pwstart, 0), pooled_width); // NOLINT pwend = min(max(pwend, 0), pooled_width); // NOLINT for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) { gradient += offset_top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void ROIPoolBackwardAve(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); // NOLINT phend = min(max(phend, 0), pooled_height); // NOLINT pwstart = min(max(pwstart, 0), pooled_width); // NOLINT pwend = min(max(pwend, 0), pooled_width); // NOLINT for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); int pool_size = (hend - hstart) * (wend - wstart); if (pool_size > 0) { gradient += offset_top_diff[ph * pooled_width + pw] / pool_size; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to roi inputs."; } if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) switch (this->layer_param_.roi_pooling_param().pool()) { case ROIPoolingParameter_PoolMethod_MAX: ROIPoolBackwardMax<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); break; case ROIPoolingParameter_PoolMethod_AVE: ROIPoolBackwardAve<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer); } // namespace caffe
61303ac7c860509021fe62b6a5171bd45db4639e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from zgecsrmv.cu normal z -> c, Sun May 3 11:22:58 2015 */ #include "common_magma.h" #define BLOCK_SIZE 256 // CSR-SpMV kernel __global__ void cgecsrmv_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaFloatComplex dot = MAGMA_C_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; dy[ row ] = dot *alpha + beta * dy[ row ]; } } // shifted CSR-SpMV kernel __global__ void cgecsrmv_kernel_shift( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex lambda, magmaFloatComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaFloatComplex * dx, magmaFloatComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaFloatComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaFloatComplex dot = MAGMA_C_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; if( row<blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. The input format is CSR (val, row, col). Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaFloatComplex scalar multiplier @param[in] dval magmaFloatComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaFloatComplex_ptr input vector x @param[in] beta magmaFloatComplex scalar multiplier @param[out] dy magmaFloatComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaFloatComplex_ptr dx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( cgecsrmv_kernel), dim3(grid), dim3(threads), 0, queue , m, n, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU. It is a shifted version of the CSR-SpMV. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaFloatComplex scalar multiplier @param[in] lambda magmaFloatComplex scalar multiplier @param[in] dval magmaFloatComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaFloatComplex_ptr input vector x @param[in] beta magmaFloatComplex scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaFloatComplex_ptr output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgecsrmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex lambda, magmaFloatComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaFloatComplex_ptr dx, magmaFloatComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaFloatComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( cgecsrmv_kernel_shift), dim3(grid), dim3(threads), 0, queue , m, n, alpha, lambda, dval, drowptr, dcolind, dx, beta, offset, blocksize, addrows, dy); return MAGMA_SUCCESS; }
61303ac7c860509021fe62b6a5171bd45db4639e.cu
/* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from zgecsrmv.cu normal z -> c, Sun May 3 11:22:58 2015 */ #include "common_magma.h" #define BLOCK_SIZE 256 // CSR-SpMV kernel __global__ void cgecsrmv_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaFloatComplex dot = MAGMA_C_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; dy[ row ] = dot *alpha + beta * dy[ row ]; } } // shifted CSR-SpMV kernel __global__ void cgecsrmv_kernel_shift( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex lambda, magmaFloatComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaFloatComplex * dx, magmaFloatComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaFloatComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaFloatComplex dot = MAGMA_C_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; if( row<blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. The input format is CSR (val, row, col). Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaFloatComplex scalar multiplier @param[in] dval magmaFloatComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaFloatComplex_ptr input vector x @param[in] beta magmaFloatComplex scalar multiplier @param[out] dy magmaFloatComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaFloatComplex_ptr dx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; cgecsrmv_kernel<<< grid, threads, 0, queue >>> (m, n, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU. It is a shifted version of the CSR-SpMV. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaFloatComplex scalar multiplier @param[in] lambda magmaFloatComplex scalar multiplier @param[in] dval magmaFloatComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaFloatComplex_ptr input vector x @param[in] beta magmaFloatComplex scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaFloatComplex_ptr output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgecsrmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex lambda, magmaFloatComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaFloatComplex_ptr dx, magmaFloatComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaFloatComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; cgecsrmv_kernel_shift<<< grid, threads, 0, queue >>> (m, n, alpha, lambda, dval, drowptr, dcolind, dx, beta, offset, blocksize, addrows, dy); return MAGMA_SUCCESS; }
dbb92a1e8157ab708117dd71399b9dabc03ee479.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2021 Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" // For an input tensor, computes the top k entries in each row // (resp. vector along the last dimension). Thus, // values.shape = indices.shape = input.shape[:-1] + [k] void FFModel::top_k(const Tensor& input, Tensor* outputs, int k, bool sorted, const char *name) { TopK* topk = new TopK(*this, input, k, sorted, name); layers.push_back(topk); assert(topk->numOutputs == 2); outputs[0] = topk->outputs[0]; outputs[1] = topk->outputs[1]; } TopK::TopK(FFModel& model, const Tensor& _input, int _k, bool _sorted, const char* name) : Op(model, OP_TOPK, name, _input), k(_k), sorted(_sorted), profiling(model.config.profiling) { numOutputs = 2; outputs[0].numDim = inputs[0].numDim; outputs[1].numDim = inputs[0].numDim; outputs[0].adim[0] = k; outputs[1].adim[0] = k; for (int i = 1; i < inputs[0].numDim; i++) { outputs[0].adim[i] = outputs[1].adim[i] = inputs[0].adim[i]; } numWeights = 0; } void TopK::create_weights(FFModel& model) { // Do nothing } void TopK::create_output_and_partition(FFModel& model) { int dim = inputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ { \ create_output_and_partition_with_dim<DIM>(model); \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { // Unsupported dim for ElementWiseBinary operator assert(false); } } } template<int NDIM> void TopK::create_output_and_partition_with_dim(FFModel& model) { // Retrive the task indexspace for the op task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is); int dims[NDIM]; dims[NDIM-1] = k; for (int i = 0; i < NDIM-1; i++) dims[i] = inputs[0].adim[NDIM-1-i]; outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; outputs[1] = model.create_tensor<NDIM>(dims, DT_INT32, this); outputs[1].owner_op = this; outputs[1].owner_idx = 1; Rect<NDIM> input_rect; input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { model.create_disjoint_partition<NDIM>( inputs[0], IndexSpaceT<NDIM>(task_is), input_lps[0], input_grad_lps[0]); } } OpMeta* TopK::init_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { TopK* topk = (TopK*) task->args; FFHandler handle = *((FFHandler*)task->local_args); TopKMeta* m = new TopKMeta(handle); m->profiling = topk->profiling; m->sorted = topk->sorted; return m; } void TopK::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ ParallelConfig pc; \ std::string pcname = name; \ ff.config.find_parallel_config(DIM, pcname, pc); \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \ argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(TOPK_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(TopK)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[1].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[1].region)); launcher.add_field(2, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ meta[idx++] = fm.get_result<OpMeta*>(*it); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } enum class HeapType { kMinHeap, kMaxHeap }; enum class PreferIndices { kLower, kHigher }; template <typename T> struct Entry { int index; T value; }; template <typename T> struct LinearData { typedef Entry<T> Entry; __device__ Entry& operator[](std::size_t index) const { return data[index]; } __device__ int get_index(int i) const { return data[i].index; } __device__ T get_value(int i) const { return data[i].value; } Entry* const data; }; template <typename T> struct IndirectLinearData { typedef Entry<T> Entry; __device__ Entry& operator[](std::size_t index) const { return data[index]; } __device__ int get_index(int i) const { return backing_data[data[i].index].index; } __device__ T get_value(int i) const { return data[i].value; } Entry* const data; Entry* const backing_data; }; template <typename T> struct StridedData { typedef Entry<T> Entry; __device__ Entry& operator[](std::size_t index) const { return data[index * blockDim.x + threadIdx.x]; } __device__ int get_index(int i) const { return (*this)[i].index; } __device__ T get_value(int i) const { return (*this)[i].value; } Entry* const data; }; // A heap of Entry<T> that can either work as a min-heap or as a max-heap. template <HeapType heapType, PreferIndices preferIndices, template <typename> class Data, typename T> struct IndexedHeap { typedef typename Data<T>::Entry Entry; const Data<T> data; __device__ IndexedHeap(const Data<T>& d) : data(d) {} __device__ bool is_above(int left, int right) { T left_value = data.get_value(left); T right_value = data.get_value(right); if (left_value == right_value) { if (preferIndices == PreferIndices::kLower) { return data.get_index(left) < data.get_index(right); } else { return data.get_index(left) > data.get_index(right); } } if (heapType == HeapType::kMinHeap) { return left_value < right_value; } else { return left_value > right_value; } } __device__ void assign(int i, const Entry& entry) { data[i] = entry; } __device__ void push_up(int i) { int child = i; int parent; for (; child > 0; child = parent) { parent = (child - 1) / 2; if (!is_above(child, parent)) { // Heap property satisfied. break; } swap(child, parent); } } __device__ void swap(int a, int b) { auto tmp = data[b]; data[b] = data[a]; data[a] = tmp; } __device__ void push_root_down(int k) { push_down(0, k); } // MAX-HEAPIFY in Cormen __device__ void push_down(int node, int k) { while (true) { const int left = 2 * node + 1; const int right = left + 1; int smallest = node; if (left < k && is_above(left, smallest)) { smallest = left; } if (right < k && is_above(right, smallest)) { smallest = right; } if (smallest == node) { break; } swap(smallest, node); node = smallest; } } // BUILD-MAX-HEAPIFY in Cormen __device__ void build(int k) { for (int node = (k - 1) / 2; node >= 0; node--) { push_down(node, k); } } // HEAP-EXTRACT-MAX in Cormen __device__ void remove_root(int k) { data[0] = data[k - 1]; push_root_down(k - 1); } // in-place HEAPSORT in Cormen // This method destroys the heap property. __device__ void sort(int k) { for (int slot = k - 1; slot > 0; slot--) { // This is like remove_root but we insert the element at the end. swap(slot, 0); // Heap is now an element smaller. push_root_down(/*k=*/slot); } } __device__ void replace_root(const Entry& entry, int k) { data[0] = entry; push_root_down(k); } __device__ const Entry& root() { return data[0]; } }; template <HeapType heapType, PreferIndices preferIndices, template <typename> class Data, typename T> __device__ IndexedHeap<heapType, preferIndices, Data, T> make_indexed_heap( typename Data<T>::Entry* data) { return IndexedHeap<heapType, preferIndices, Data, T>{Data<T>{data}}; } // heapTopK walks over [input, input+length) with `step_size` stride starting at // `start_index`. // It builds a top-`k` heap that is stored in `heap_entries` using `Accessor` to // access elements in `heap_entries`. If sorted=true, the elements will be // sorted at the end. template <typename T, template <typename> class Data = LinearData> __device__ void heapTopK(const T* __restrict__ input, int length, int k, Entry<T>* __restrict__ heap_entries, bool sorted = false, int start_index = 0, int step_size = 1) { assert(k <= length); auto heap = make_indexed_heap<HeapType::kMinHeap, PreferIndices::kHigher, Data, T>( heap_entries); int heap_end_index = start_index + k * step_size; if (heap_end_index > length) { heap_end_index = length; } // Initialize the min-heap. for (int index = start_index, slot = 0; index < heap_end_index; index += step_size, slot++) { heap.assign(slot, {index, input[index]}); } heap.build(k); // Now iterate over the remaining items. // If an item is smaller than the min element, it is not amongst the top k. // Otherwise, replace the min element with it and push upwards. for (int index = heap_end_index; index < length; index += step_size) { // We prefer elements with lower indices. This is given here. // Later elements automatically have higher indices, so can be discarded. if (input[index] > heap.root().value) { // This element should replace the min. heap.replace_root({index, input[index]}, k); } } // Sort if wanted. if (sorted) { heap.sort(k); } } // mergeShards performs a top-k merge on `num_shards` many sorted streams that // are sorted and stored in `entries` in a strided way: // |s_1 1st|s_2 1st|...s_{num_shards} 1st|s_1 2nd|s_2 2nd|... // The overall top k elements are written to `top_k_values` and their indices // to top_k_indices. // `top_k_heap` is used as temporary storage for the merge heap. template <typename T> __device__ void mergeShards(int num_shards, int k, Entry<T>* __restrict__ entries, Entry<T>* __restrict__ top_k_heap, T* top_k_values, int* top_k_indices) { // If k < num_shards, we can use a min-heap with k elements to get the top k // of the sorted blocks. // If k > num_shards, we can initialize a min-heap with the top element from // each sorted block. const int heap_size = k < num_shards ? k : num_shards; // Min-heap part. { auto min_heap = IndexedHeap<HeapType::kMinHeap, PreferIndices::kHigher, IndirectLinearData, T>{ IndirectLinearData<T>{top_k_heap, entries}}; // Initialize the heap as a min-heap. for (int slot = 0; slot < heap_size; slot++) { min_heap.assign(slot, {slot, entries[slot].value}); } min_heap.build(heap_size); // Now perform top k with the remaining shards (if num_shards > heap_size). for (int shard = heap_size; shard < num_shards; shard++) { const auto entry = entries[shard]; const auto root = min_heap.root(); if (entry.value < root.value) { continue; } if (entry.value == root.value && entry.index > entries[root.index].index) { continue; } // This element should replace the min. min_heap.replace_root({shard, entry.value}, heap_size); } } // Max-part. { // Turn the min-heap into a max-heap in-place. auto max_heap = IndexedHeap<HeapType::kMaxHeap, PreferIndices::kLower, IndirectLinearData, T>{ IndirectLinearData<T>{top_k_heap, entries}}; // Heapify into a max heap. max_heap.build(heap_size); // Now extract the minimum k-1 times. // k is treated specially. const int last_k = k - 1; for (int rank = 0; rank < last_k; rank++) { const Entry<T>& max_element = max_heap.root(); top_k_values[rank] = max_element.value; int shard_index = max_element.index; top_k_indices[rank] = entries[shard_index].index; int next_shard_index = shard_index + num_shards; // For rank < k-1, each top k heap still contains at least 1 element, // so we can draw a replacement. max_heap.replace_root({next_shard_index, entries[next_shard_index].value}, heap_size); } // rank == last_k. const Entry<T>& max_element = max_heap.root(); top_k_values[last_k] = max_element.value; int shard_index = max_element.index; top_k_indices[last_k] = entries[shard_index].index; } } template <typename T> __global__ void topk_forward_kernel(const T* __restrict__ input, size_t shared_memory_size, int length, int k, bool sorted, T* __restrict__ output, int* __restrict__ indices) { __shared__ char shared_memory[48 << 10]; const int batch_index = blockIdx.x; const T* batch_input = input + batch_index * length; const int thread_index = threadIdx.x; const int thread_count = blockDim.x; Entry<T>* shared_entries = (Entry<T>*)shared_memory; heapTopK<T, StridedData>(batch_input, length, k, shared_entries, true, thread_index, thread_count); __syncthreads(); if (thread_index == 0) { const int offset = batch_index * k; auto batch_output = output + offset; auto batch_indices = indices + offset; Entry<T>* top_k_heap = shared_entries + thread_count * k; mergeShards(thread_count, k, shared_entries, top_k_heap, batch_output, batch_indices); } } /*static*/ void TopK::forward_kernel(const TopKMeta* m, const float* input_ptr, float* output_ptr, int* indices_ptr, size_t batch_size, int length, int k, bool sorted, hipStream_t stream) { // Adopted from TensorFlow's TopK implementation // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/topk_op_gpu.h int num_shards = 0; { constexpr auto shared_memory_size = 48 << 10; const auto heap_size = k * sizeof(Entry<float>); // shared_memory_size = (num_shards + 1) * heap_size <=> num_shards = shared_memory_size / heap_size - 1; assert(num_shards > 0); if (num_shards > CUDA_NUM_THREADS) num_shards = CUDA_NUM_THREADS; } // We are limited by the amount of shared memory we have per block. size_t shared_memory_size = (num_shards + 1) * k * sizeof(Entry<float>); //size_t num_blocks = (batch_size + num_shards - 1) / num_shards; size_t num_blocks = batch_size; assert(num_shards >= (size_t)k); num_shards = k; hipLaunchKernelGGL(( topk_forward_kernel), dim3(num_blocks), dim3(num_shards), 0, stream, input_ptr, shared_memory_size, length, k, sorted, output_ptr, indices_ptr); } void TopK::forward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { assert(regions.size() == 3); assert(task->regions.size() == 3); //const TopK* topk = (const TopK*) task->args; const TopKMeta* m = *((TopKMeta**)task->local_args); Domain in1_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain out1_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Domain out2_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); int in_cols = in1_domain.hi()[0] - in1_domain.lo()[0] + 1; int out1_cols = out1_domain.hi()[0] - out1_domain.lo()[0] + 1; int out2_cols = out2_domain.hi()[0] - out2_domain.lo()[0] + 1; assert(out1_domain == out2_domain); for (int i = 1; i < in1_domain.get_dim(); i++) { assert(in1_domain.lo()[i] == out1_domain.lo()[i]); assert(in1_domain.hi()[i] == out1_domain.hi()[i]); } const float* in_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); float* value_ptr = helperGetTensorPointerWO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); int* index_ptr = helperGetTensorPointerWO<int>( regions[2], task->regions[2], FID_DATA, ctx, runtime); hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start, stream); } int length = in1_domain.hi()[0] - in1_domain.lo()[0] + 1; int k = out1_domain.hi()[0] - out1_domain.lo()[0] + 1; /*TODO: This prints to 5*/ size_t batch_size = in1_domain.get_volume() / length; forward_kernel(m, in_ptr, value_ptr, index_ptr, batch_size, length, k, m->sorted, stream); if (m->profiling) { hipEventRecord(t_end, stream); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); } } void TopK::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(TOPK_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[1].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[1].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } template<typename T> __global__ void topk_backward_kernel(const T* __restrict__ value_grad_ptr, const int* __restrict__ indices_ptr, T* __restrict__ in_grad_ptr, size_t batch_size, int length, int k) { coord_t size = (coord_t)batch_size * k; CUDA_KERNEL_LOOP(i, size) { coord_t batch_idx = i / k; coord_t src_offset = batch_idx * length + indices_ptr[i]; in_grad_ptr[src_offset] += value_grad_ptr[i]; } } /*static*/ void TopK::backward_kernel(const TopKMeta* m, const float* value_grad_ptr, const int* indices_ptr, float* in_grad_ptr, size_t batch_size, int length, int k, hipStream_t stream) { hipLaunchKernelGGL(( topk_backward_kernel), dim3(GET_BLOCKS(batch_size*k)), dim3(CUDA_NUM_THREADS), 0, stream, value_grad_ptr, indices_ptr, in_grad_ptr, batch_size, length, k); } /* regions[0](I): out1_grad regions[1](I): out2 regions[2](I/0): in_grad */ void TopK::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { //const TopK* topk = (const TopK*) task->args; const TopKMeta* m = *((TopKMeta**) task->local_args); assert(regions.size() == 3); Domain out1_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain out2_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); assert(out1_domain == out2_domain); for (int i = 1; i < in_domain.get_dim(); i++) { assert(in_domain.lo()[i] == out1_domain.lo()[i]); assert(in_domain.hi()[i] == out1_domain.hi()[i]); } const float* value_grad_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); const int* indices_ptr = helperGetTensorPointerRO<int>( regions[1], task->regions[1], FID_DATA, ctx, runtime); float* in_grad_ptr = helperGetTensorPointerRW<float>( regions[2], task->regions[2], FID_DATA, ctx, runtime); hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start, stream); } int length = in_domain.hi()[0] - in_domain.lo()[0] + 1; int k = out1_domain.hi()[0] - out1_domain.lo()[0] + 1; size_t batch_size = in_domain.get_volume() / length; backward_kernel(m, value_grad_ptr, indices_ptr, in_grad_ptr, batch_size, length, k, stream); // TODO: missing profiling here } void TopK::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(TOPK_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): value_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(0, FID_DATA); // regions[1](I): indices launcher.add_region_requirement( RegionRequirement(outputs[1].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[1].region)); launcher.add_field(1, FID_DATA); // regions[2](I/O): input_grad launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } TopKMeta::TopKMeta(FFHandler handler) : OpMeta(handler) { } bool TopK::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { // To be implemented assert(false); return false; }
dbb92a1e8157ab708117dd71399b9dabc03ee479.cu
/* Copyright 2021 Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" // For an input tensor, computes the top k entries in each row // (resp. vector along the last dimension). Thus, // values.shape = indices.shape = input.shape[:-1] + [k] void FFModel::top_k(const Tensor& input, Tensor* outputs, int k, bool sorted, const char *name) { TopK* topk = new TopK(*this, input, k, sorted, name); layers.push_back(topk); assert(topk->numOutputs == 2); outputs[0] = topk->outputs[0]; outputs[1] = topk->outputs[1]; } TopK::TopK(FFModel& model, const Tensor& _input, int _k, bool _sorted, const char* name) : Op(model, OP_TOPK, name, _input), k(_k), sorted(_sorted), profiling(model.config.profiling) { numOutputs = 2; outputs[0].numDim = inputs[0].numDim; outputs[1].numDim = inputs[0].numDim; outputs[0].adim[0] = k; outputs[1].adim[0] = k; for (int i = 1; i < inputs[0].numDim; i++) { outputs[0].adim[i] = outputs[1].adim[i] = inputs[0].adim[i]; } numWeights = 0; } void TopK::create_weights(FFModel& model) { // Do nothing } void TopK::create_output_and_partition(FFModel& model) { int dim = inputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ { \ create_output_and_partition_with_dim<DIM>(model); \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { // Unsupported dim for ElementWiseBinary operator assert(false); } } } template<int NDIM> void TopK::create_output_and_partition_with_dim(FFModel& model) { // Retrive the task indexspace for the op task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is); int dims[NDIM]; dims[NDIM-1] = k; for (int i = 0; i < NDIM-1; i++) dims[i] = inputs[0].adim[NDIM-1-i]; outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; outputs[1] = model.create_tensor<NDIM>(dims, DT_INT32, this); outputs[1].owner_op = this; outputs[1].owner_idx = 1; Rect<NDIM> input_rect; input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { model.create_disjoint_partition<NDIM>( inputs[0], IndexSpaceT<NDIM>(task_is), input_lps[0], input_grad_lps[0]); } } OpMeta* TopK::init_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { TopK* topk = (TopK*) task->args; FFHandler handle = *((FFHandler*)task->local_args); TopKMeta* m = new TopKMeta(handle); m->profiling = topk->profiling; m->sorted = topk->sorted; return m; } void TopK::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ ParallelConfig pc; \ std::string pcname = name; \ ff.config.find_parallel_config(DIM, pcname, pc); \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \ argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(TOPK_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(TopK)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[1].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[1].region)); launcher.add_field(2, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ meta[idx++] = fm.get_result<OpMeta*>(*it); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } enum class HeapType { kMinHeap, kMaxHeap }; enum class PreferIndices { kLower, kHigher }; template <typename T> struct Entry { int index; T value; }; template <typename T> struct LinearData { typedef Entry<T> Entry; __device__ Entry& operator[](std::size_t index) const { return data[index]; } __device__ int get_index(int i) const { return data[i].index; } __device__ T get_value(int i) const { return data[i].value; } Entry* const data; }; template <typename T> struct IndirectLinearData { typedef Entry<T> Entry; __device__ Entry& operator[](std::size_t index) const { return data[index]; } __device__ int get_index(int i) const { return backing_data[data[i].index].index; } __device__ T get_value(int i) const { return data[i].value; } Entry* const data; Entry* const backing_data; }; template <typename T> struct StridedData { typedef Entry<T> Entry; __device__ Entry& operator[](std::size_t index) const { return data[index * blockDim.x + threadIdx.x]; } __device__ int get_index(int i) const { return (*this)[i].index; } __device__ T get_value(int i) const { return (*this)[i].value; } Entry* const data; }; // A heap of Entry<T> that can either work as a min-heap or as a max-heap. template <HeapType heapType, PreferIndices preferIndices, template <typename> class Data, typename T> struct IndexedHeap { typedef typename Data<T>::Entry Entry; const Data<T> data; __device__ IndexedHeap(const Data<T>& d) : data(d) {} __device__ bool is_above(int left, int right) { T left_value = data.get_value(left); T right_value = data.get_value(right); if (left_value == right_value) { if (preferIndices == PreferIndices::kLower) { return data.get_index(left) < data.get_index(right); } else { return data.get_index(left) > data.get_index(right); } } if (heapType == HeapType::kMinHeap) { return left_value < right_value; } else { return left_value > right_value; } } __device__ void assign(int i, const Entry& entry) { data[i] = entry; } __device__ void push_up(int i) { int child = i; int parent; for (; child > 0; child = parent) { parent = (child - 1) / 2; if (!is_above(child, parent)) { // Heap property satisfied. break; } swap(child, parent); } } __device__ void swap(int a, int b) { auto tmp = data[b]; data[b] = data[a]; data[a] = tmp; } __device__ void push_root_down(int k) { push_down(0, k); } // MAX-HEAPIFY in Cormen __device__ void push_down(int node, int k) { while (true) { const int left = 2 * node + 1; const int right = left + 1; int smallest = node; if (left < k && is_above(left, smallest)) { smallest = left; } if (right < k && is_above(right, smallest)) { smallest = right; } if (smallest == node) { break; } swap(smallest, node); node = smallest; } } // BUILD-MAX-HEAPIFY in Cormen __device__ void build(int k) { for (int node = (k - 1) / 2; node >= 0; node--) { push_down(node, k); } } // HEAP-EXTRACT-MAX in Cormen __device__ void remove_root(int k) { data[0] = data[k - 1]; push_root_down(k - 1); } // in-place HEAPSORT in Cormen // This method destroys the heap property. __device__ void sort(int k) { for (int slot = k - 1; slot > 0; slot--) { // This is like remove_root but we insert the element at the end. swap(slot, 0); // Heap is now an element smaller. push_root_down(/*k=*/slot); } } __device__ void replace_root(const Entry& entry, int k) { data[0] = entry; push_root_down(k); } __device__ const Entry& root() { return data[0]; } }; template <HeapType heapType, PreferIndices preferIndices, template <typename> class Data, typename T> __device__ IndexedHeap<heapType, preferIndices, Data, T> make_indexed_heap( typename Data<T>::Entry* data) { return IndexedHeap<heapType, preferIndices, Data, T>{Data<T>{data}}; } // heapTopK walks over [input, input+length) with `step_size` stride starting at // `start_index`. // It builds a top-`k` heap that is stored in `heap_entries` using `Accessor` to // access elements in `heap_entries`. If sorted=true, the elements will be // sorted at the end. template <typename T, template <typename> class Data = LinearData> __device__ void heapTopK(const T* __restrict__ input, int length, int k, Entry<T>* __restrict__ heap_entries, bool sorted = false, int start_index = 0, int step_size = 1) { assert(k <= length); auto heap = make_indexed_heap<HeapType::kMinHeap, PreferIndices::kHigher, Data, T>( heap_entries); int heap_end_index = start_index + k * step_size; if (heap_end_index > length) { heap_end_index = length; } // Initialize the min-heap. for (int index = start_index, slot = 0; index < heap_end_index; index += step_size, slot++) { heap.assign(slot, {index, input[index]}); } heap.build(k); // Now iterate over the remaining items. // If an item is smaller than the min element, it is not amongst the top k. // Otherwise, replace the min element with it and push upwards. for (int index = heap_end_index; index < length; index += step_size) { // We prefer elements with lower indices. This is given here. // Later elements automatically have higher indices, so can be discarded. if (input[index] > heap.root().value) { // This element should replace the min. heap.replace_root({index, input[index]}, k); } } // Sort if wanted. if (sorted) { heap.sort(k); } } // mergeShards performs a top-k merge on `num_shards` many sorted streams that // are sorted and stored in `entries` in a strided way: // |s_1 1st|s_2 1st|...s_{num_shards} 1st|s_1 2nd|s_2 2nd|... // The overall top k elements are written to `top_k_values` and their indices // to top_k_indices. // `top_k_heap` is used as temporary storage for the merge heap. template <typename T> __device__ void mergeShards(int num_shards, int k, Entry<T>* __restrict__ entries, Entry<T>* __restrict__ top_k_heap, T* top_k_values, int* top_k_indices) { // If k < num_shards, we can use a min-heap with k elements to get the top k // of the sorted blocks. // If k > num_shards, we can initialize a min-heap with the top element from // each sorted block. const int heap_size = k < num_shards ? k : num_shards; // Min-heap part. { auto min_heap = IndexedHeap<HeapType::kMinHeap, PreferIndices::kHigher, IndirectLinearData, T>{ IndirectLinearData<T>{top_k_heap, entries}}; // Initialize the heap as a min-heap. for (int slot = 0; slot < heap_size; slot++) { min_heap.assign(slot, {slot, entries[slot].value}); } min_heap.build(heap_size); // Now perform top k with the remaining shards (if num_shards > heap_size). for (int shard = heap_size; shard < num_shards; shard++) { const auto entry = entries[shard]; const auto root = min_heap.root(); if (entry.value < root.value) { continue; } if (entry.value == root.value && entry.index > entries[root.index].index) { continue; } // This element should replace the min. min_heap.replace_root({shard, entry.value}, heap_size); } } // Max-part. { // Turn the min-heap into a max-heap in-place. auto max_heap = IndexedHeap<HeapType::kMaxHeap, PreferIndices::kLower, IndirectLinearData, T>{ IndirectLinearData<T>{top_k_heap, entries}}; // Heapify into a max heap. max_heap.build(heap_size); // Now extract the minimum k-1 times. // k is treated specially. const int last_k = k - 1; for (int rank = 0; rank < last_k; rank++) { const Entry<T>& max_element = max_heap.root(); top_k_values[rank] = max_element.value; int shard_index = max_element.index; top_k_indices[rank] = entries[shard_index].index; int next_shard_index = shard_index + num_shards; // For rank < k-1, each top k heap still contains at least 1 element, // so we can draw a replacement. max_heap.replace_root({next_shard_index, entries[next_shard_index].value}, heap_size); } // rank == last_k. const Entry<T>& max_element = max_heap.root(); top_k_values[last_k] = max_element.value; int shard_index = max_element.index; top_k_indices[last_k] = entries[shard_index].index; } } template <typename T> __global__ void topk_forward_kernel(const T* __restrict__ input, size_t shared_memory_size, int length, int k, bool sorted, T* __restrict__ output, int* __restrict__ indices) { __shared__ char shared_memory[48 << 10]; const int batch_index = blockIdx.x; const T* batch_input = input + batch_index * length; const int thread_index = threadIdx.x; const int thread_count = blockDim.x; Entry<T>* shared_entries = (Entry<T>*)shared_memory; heapTopK<T, StridedData>(batch_input, length, k, shared_entries, true, thread_index, thread_count); __syncthreads(); if (thread_index == 0) { const int offset = batch_index * k; auto batch_output = output + offset; auto batch_indices = indices + offset; Entry<T>* top_k_heap = shared_entries + thread_count * k; mergeShards(thread_count, k, shared_entries, top_k_heap, batch_output, batch_indices); } } /*static*/ void TopK::forward_kernel(const TopKMeta* m, const float* input_ptr, float* output_ptr, int* indices_ptr, size_t batch_size, int length, int k, bool sorted, cudaStream_t stream) { // Adopted from TensorFlow's TopK implementation // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/topk_op_gpu.h int num_shards = 0; { constexpr auto shared_memory_size = 48 << 10; const auto heap_size = k * sizeof(Entry<float>); // shared_memory_size = (num_shards + 1) * heap_size <=> num_shards = shared_memory_size / heap_size - 1; assert(num_shards > 0); if (num_shards > CUDA_NUM_THREADS) num_shards = CUDA_NUM_THREADS; } // We are limited by the amount of shared memory we have per block. size_t shared_memory_size = (num_shards + 1) * k * sizeof(Entry<float>); //size_t num_blocks = (batch_size + num_shards - 1) / num_shards; size_t num_blocks = batch_size; assert(num_shards >= (size_t)k); num_shards = k; topk_forward_kernel<<<num_blocks, num_shards, 0, stream>>>( input_ptr, shared_memory_size, length, k, sorted, output_ptr, indices_ptr); } void TopK::forward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { assert(regions.size() == 3); assert(task->regions.size() == 3); //const TopK* topk = (const TopK*) task->args; const TopKMeta* m = *((TopKMeta**)task->local_args); Domain in1_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain out1_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Domain out2_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); int in_cols = in1_domain.hi()[0] - in1_domain.lo()[0] + 1; int out1_cols = out1_domain.hi()[0] - out1_domain.lo()[0] + 1; int out2_cols = out2_domain.hi()[0] - out2_domain.lo()[0] + 1; assert(out1_domain == out2_domain); for (int i = 1; i < in1_domain.get_dim(); i++) { assert(in1_domain.lo()[i] == out1_domain.lo()[i]); assert(in1_domain.hi()[i] == out1_domain.hi()[i]); } const float* in_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); float* value_ptr = helperGetTensorPointerWO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); int* index_ptr = helperGetTensorPointerWO<int>( regions[2], task->regions[2], FID_DATA, ctx, runtime); cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start, stream); } int length = in1_domain.hi()[0] - in1_domain.lo()[0] + 1; int k = out1_domain.hi()[0] - out1_domain.lo()[0] + 1; /*TODO: This prints to 5*/ size_t batch_size = in1_domain.get_volume() / length; forward_kernel(m, in_ptr, value_ptr, index_ptr, batch_size, length, k, m->sorted, stream); if (m->profiling) { cudaEventRecord(t_end, stream); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); } } void TopK::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(TOPK_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[1].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[1].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } template<typename T> __global__ void topk_backward_kernel(const T* __restrict__ value_grad_ptr, const int* __restrict__ indices_ptr, T* __restrict__ in_grad_ptr, size_t batch_size, int length, int k) { coord_t size = (coord_t)batch_size * k; CUDA_KERNEL_LOOP(i, size) { coord_t batch_idx = i / k; coord_t src_offset = batch_idx * length + indices_ptr[i]; in_grad_ptr[src_offset] += value_grad_ptr[i]; } } /*static*/ void TopK::backward_kernel(const TopKMeta* m, const float* value_grad_ptr, const int* indices_ptr, float* in_grad_ptr, size_t batch_size, int length, int k, cudaStream_t stream) { topk_backward_kernel<<<GET_BLOCKS(batch_size*k), CUDA_NUM_THREADS, 0, stream>>>( value_grad_ptr, indices_ptr, in_grad_ptr, batch_size, length, k); } /* regions[0](I): out1_grad regions[1](I): out2 regions[2](I/0): in_grad */ void TopK::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { //const TopK* topk = (const TopK*) task->args; const TopKMeta* m = *((TopKMeta**) task->local_args); assert(regions.size() == 3); Domain out1_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain out2_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); assert(out1_domain == out2_domain); for (int i = 1; i < in_domain.get_dim(); i++) { assert(in_domain.lo()[i] == out1_domain.lo()[i]); assert(in_domain.hi()[i] == out1_domain.hi()[i]); } const float* value_grad_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); const int* indices_ptr = helperGetTensorPointerRO<int>( regions[1], task->regions[1], FID_DATA, ctx, runtime); float* in_grad_ptr = helperGetTensorPointerRW<float>( regions[2], task->regions[2], FID_DATA, ctx, runtime); cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start, stream); } int length = in_domain.hi()[0] - in_domain.lo()[0] + 1; int k = out1_domain.hi()[0] - out1_domain.lo()[0] + 1; size_t batch_size = in_domain.get_volume() / length; backward_kernel(m, value_grad_ptr, indices_ptr, in_grad_ptr, batch_size, length, k, stream); // TODO: missing profiling here } void TopK::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(TOPK_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): value_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(0, FID_DATA); // regions[1](I): indices launcher.add_region_requirement( RegionRequirement(outputs[1].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[1].region)); launcher.add_field(1, FID_DATA); // regions[2](I/O): input_grad launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } TopKMeta::TopKMeta(FFHandler handler) : OpMeta(handler) { } bool TopK::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { // To be implemented assert(false); return false; }
d43e329201f35884a0444e1e0aef5ec05263b6c3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <hip/hip_runtime_api.h> #define N 1000000 __global__ void vector_add(float *out, float *a, float *b, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < n; i += stride){ out[i] = a[i] + b[i]; } } int main(){ hipProfilerStart(); float *a, *b, *out; // Allocate memory hipMallocManaged(&a, sizeof(float) * N); hipMallocManaged(&b, sizeof(float) * N); hipMallocManaged(&out, sizeof(float) * N); // Initialize array for(int i = 0; i < N; i++){ a[i] = 1.0f; b[i] = 2.0f; } // Main function int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( vector_add), dim3(numBlocks), dim3(blockSize), 0, 0, out, a, b, N); hipDeviceSynchronize(); hipFree(a); hipFree(b); hipFree(out); hipDeviceReset(); hipProfilerStop(); }
d43e329201f35884a0444e1e0aef5ec05263b6c3.cu
#include <stdlib.h> #include <cuda_profiler_api.h> #define N 1000000 __global__ void vector_add(float *out, float *a, float *b, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < n; i += stride){ out[i] = a[i] + b[i]; } } int main(){ cudaProfilerStart(); float *a, *b, *out; // Allocate memory cudaMallocManaged(&a, sizeof(float) * N); cudaMallocManaged(&b, sizeof(float) * N); cudaMallocManaged(&out, sizeof(float) * N); // Initialize array for(int i = 0; i < N; i++){ a[i] = 1.0f; b[i] = 2.0f; } // Main function int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; vector_add<<<numBlocks, blockSize>>>(out, a, b, N); cudaDeviceSynchronize(); cudaFree(a); cudaFree(b); cudaFree(out); cudaDeviceReset(); cudaProfilerStop(); }
7a0aeb3236a5c041519e74f71a562c853adeaec2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // SPDX-FileCopyrightText: 2021 CERN // SPDX-License-Identifier: Apache-2.0 #include "TestEm3.h" #include "TestEm3.cuh" #include <AdePT/Atomic.h> #include <AdePT/BVHNavigator.h> #include <AdePT/MParray.h> #include <CopCore/Global.h> #include <CopCore/PhysicalConstants.h> #include <CopCore/Ranluxpp.h> #include <VecGeom/base/Config.h> #include <VecGeom/base/Stopwatch.h> #ifdef VECGEOM_ENABLE_CUDA #include <VecGeom/backend/cuda/Interface.h> #endif #include <G4HepEmData.hh> #include <G4HepEmElectronInit.hh> #include <G4HepEmGammaInit.hh> #include <G4HepEmMatCutData.hh> #include <G4HepEmMaterialInit.hh> #include <G4HepEmParameters.hh> #include <G4HepEmParametersInit.hh> #include <iostream> #include <iomanip> #include <stdio.h> #include <thread> #include <vector> __constant__ __device__ struct G4HepEmParameters g4HepEmPars; __constant__ __device__ struct G4HepEmData g4HepEmData; __constant__ __device__ int *MCIndex = nullptr; __constant__ __device__ int Zero = 0; struct G4HepEmState { G4HepEmData data; G4HepEmParameters parameters; }; static G4HepEmState *InitG4HepEm() { G4HepEmState *state = new G4HepEmState; InitG4HepEmData(&state->data); InitHepEmParameters(&state->parameters); InitMaterialAndCoupleData(&state->data, &state->parameters); InitElectronData(&state->data, &state->parameters, true); InitElectronData(&state->data, &state->parameters, false); InitGammaData(&state->data, &state->parameters); G4HepEmMatCutData *cutData = state->data.fTheMatCutData; std::cout << "fNumG4MatCuts = " << cutData->fNumG4MatCuts << ", fNumMatCutData = " << cutData->fNumMatCutData << std::endl; // Copy to GPU. CopyG4HepEmDataToGPU(&state->data); COPCORE_CUDA_CHECK(hipMemcpyToSymbol(g4HepEmPars, &state->parameters, sizeof(G4HepEmParameters))); // Create G4HepEmData with the device pointers. G4HepEmData dataOnDevice; dataOnDevice.fTheMatCutData = state->data.fTheMatCutData_gpu; dataOnDevice.fTheMaterialData = state->data.fTheMaterialData_gpu; dataOnDevice.fTheElementData = state->data.fTheElementData_gpu; dataOnDevice.fTheElectronData = state->data.fTheElectronData_gpu; dataOnDevice.fThePositronData = state->data.fThePositronData_gpu; dataOnDevice.fTheSBTableData = state->data.fTheSBTableData_gpu; dataOnDevice.fTheGammaData = state->data.fTheGammaData_gpu; // The other pointers should never be used. dataOnDevice.fTheMatCutData_gpu = nullptr; dataOnDevice.fTheMaterialData_gpu = nullptr; dataOnDevice.fTheElementData_gpu = nullptr; dataOnDevice.fTheElectronData_gpu = nullptr; dataOnDevice.fThePositronData_gpu = nullptr; dataOnDevice.fTheSBTableData_gpu = nullptr; dataOnDevice.fTheGammaData_gpu = nullptr; COPCORE_CUDA_CHECK(hipMemcpyToSymbol(g4HepEmData, &dataOnDevice, sizeof(G4HepEmData))); return state; } static void FreeG4HepEm(G4HepEmState *state) { FreeG4HepEmData(&state->data); delete state; } // A bundle of queues per particle type: // * Two for active particles, one for the current iteration and the second for the next. struct ParticleQueues { adept::MParray *currentlyActive; adept::MParray *nextActive; void SwapActive() { std::swap(currentlyActive, nextActive); } }; struct ParticleType { Track *tracks; SlotManager *slotManager; ParticleQueues queues; hipStream_t stream; hipEvent_t event; enum { Electron = 0, Positron = 1, Gamma = 2, NumParticleTypes, }; }; // A bundle of queues for the three particle types. struct AllParticleQueues { ParticleQueues queues[ParticleType::NumParticleTypes]; }; // Kernel to initialize the set of queues per particle type. __global__ void InitParticleQueues(ParticleQueues queues, size_t Capacity) { adept::MParray::MakeInstanceAt(Capacity, queues.currentlyActive); adept::MParray::MakeInstanceAt(Capacity, queues.nextActive); } // Kernel function to initialize a set of primary particles. __global__ void InitPrimaries(ParticleGenerator generator, int startEvent, int numEvents, double energy, double startX, const vecgeom::VPlacedVolume *world, GlobalScoring *globalScoring) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < numEvents; i += blockDim.x * gridDim.x) { Track &track = generator.NextTrack(); track.rngState.SetSeed(314159265 * (startEvent + i)); track.energy = energy; track.numIALeft[0] = -1.0; track.numIALeft[1] = -1.0; track.numIALeft[2] = -1.0; track.pos = {startX, 0, 0}; track.dir = {1.0, 0, 0}; track.navState.Clear(); BVHNavigator::LocatePointIn(world, track.pos, track.navState, true); atomicAdd(&globalScoring->numElectrons, 1); } } // A data structure to transfer statistics after each iteration. struct Stats { int inFlight[ParticleType::NumParticleTypes]; }; // Finish iteration: clear queues and fill statistics. __global__ void FinishIteration(AllParticleQueues all, Stats *stats) { for (int i = 0; i < ParticleType::NumParticleTypes; i++) { all.queues[i].currentlyActive->clear(); stats->inFlight[i] = all.queues[i].nextActive->size(); } } // Deposit energy of particles still in flight. __global__ void DepositEnergy(Track *allTracks, const adept::MParray *queue, GlobalScoring *globalScoring, ScoringPerVolume *scoringPerVolume) { int queueSize = queue->size(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < queueSize; i += blockDim.x * gridDim.x) { const int slot = (*queue)[i]; Track &currentTrack = allTracks[slot]; auto volume = currentTrack.navState.Top(); if (volume == nullptr) { // The particle left the world, why wasn't it killed before?! continue; } int volumeID = volume->id(); double energy = currentTrack.energy; atomicAdd(&globalScoring->energyDeposit, energy); atomicAdd(&scoringPerVolume->energyDeposit[volumeID], energy); } } __global__ void ClearQueue(adept::MParray *queue) { queue->clear(); } struct ThreadData { ParticleType particles[ParticleType::NumParticleTypes]; hipStream_t stream; Stats *stats; Stats *stats_dev; const vecgeom::cuda::VPlacedVolume *world; const int *MCIndex; const SlotManager *slotManagerInit; ScoringPerVolume *scoringPerVolume; GlobalScoring *globalScoring; int id; int threads; int numParticles; int batch; double energy; double startX; void Allocate(size_t capacity) { // Allocate structures to manage tracks of an implicit type: // * memory to hold the actual Track elements, // * objects to manage slots inside the memory, // * queues of slots to remember active particle and those needing relocation, // * a stream and an event for synchronization of kernels. const size_t TracksSize = sizeof(Track) * capacity; const size_t QueueSize = adept::MParray::SizeOfInstance(capacity); for (int i = 0; i < ParticleType::NumParticleTypes; i++) { COPCORE_CUDA_CHECK(hipMalloc(&particles[i].tracks, TracksSize)); COPCORE_CUDA_CHECK(hipMalloc(&particles[i].slotManager, sizeof(SlotManager))); COPCORE_CUDA_CHECK(hipMalloc(&particles[i].queues.currentlyActive, QueueSize)); COPCORE_CUDA_CHECK(hipMalloc(&particles[i].queues.nextActive, QueueSize)); hipLaunchKernelGGL(( InitParticleQueues), dim3(1), dim3(1), 0, 0, particles[i].queues, capacity); COPCORE_CUDA_CHECK(hipStreamCreate(&particles[i].stream)); COPCORE_CUDA_CHECK(hipEventCreate(&particles[i].event)); } COPCORE_CUDA_CHECK(hipDeviceSynchronize()); // Create a stream to synchronize kernels of all particle types. COPCORE_CUDA_CHECK(hipStreamCreate(&stream)); COPCORE_CUDA_CHECK(hipMalloc(&stats_dev, sizeof(Stats))); COPCORE_CUDA_CHECK(hipHostMalloc(&stats, sizeof(Stats))); } void Free() { COPCORE_CUDA_CHECK(hipFree(stats_dev)); COPCORE_CUDA_CHECK(hipHostFree(stats)); COPCORE_CUDA_CHECK(hipStreamDestroy(stream)); for (int i = 0; i < ParticleType::NumParticleTypes; i++) { COPCORE_CUDA_CHECK(hipFree(particles[i].tracks)); COPCORE_CUDA_CHECK(hipFree(particles[i].slotManager)); COPCORE_CUDA_CHECK(hipFree(particles[i].queues.currentlyActive)); COPCORE_CUDA_CHECK(hipFree(particles[i].queues.nextActive)); COPCORE_CUDA_CHECK(hipStreamDestroy(particles[i].stream)); COPCORE_CUDA_CHECK(hipEventDestroy(particles[i].event)); } } }; static void Worker(ThreadData *data) { ParticleType *particles = data->particles; ParticleType &electrons = particles[ParticleType::Electron]; ParticleType &positrons = particles[ParticleType::Positron]; ParticleType &gammas = particles[ParticleType::Gamma]; Stats *stats = data->stats; Stats *stats_dev = data->stats_dev; hipStream_t &stream = data->stream; ScoringPerVolume *scoringPerVolume = data->scoringPerVolume; GlobalScoring *globalScoring = data->globalScoring; // Calculate this thread's chunk. int perThread = data->numParticles / data->threads; int remainder = data->numParticles % data->threads; int startEvent = 1 + data->id * perThread; if (data->id < remainder) { perThread++; startEvent += data->id; } else { startEvent += remainder; } int endEvent = startEvent + perThread; for (; startEvent < endEvent; startEvent += data->batch) { int left = endEvent - startEvent; int chunk = ::min(left, data->batch); for (int i = 0; i < ParticleType::NumParticleTypes; i++) { COPCORE_CUDA_CHECK(hipMemcpyAsync(particles[i].slotManager, data->slotManagerInit, sizeof(SlotManager), hipMemcpyDeviceToDevice, stream)); } // Initialize primary particles. constexpr int InitThreads = 32; int initBlocks = (chunk + InitThreads - 1) / InitThreads; ParticleGenerator electronGenerator(electrons.tracks, electrons.slotManager, electrons.queues.currentlyActive); hipLaunchKernelGGL(( InitPrimaries), dim3(initBlocks), dim3(InitThreads), 0, stream, electronGenerator, startEvent, chunk, data->energy, data->startX, data->world, globalScoring); COPCORE_CUDA_CHECK(hipStreamSynchronize(stream)); stats->inFlight[ParticleType::Electron] = chunk; stats->inFlight[ParticleType::Positron] = 0; stats->inFlight[ParticleType::Gamma] = 0; constexpr int MaxBlocks = 1024; constexpr int TransportThreads = 32; int transportBlocks; int inFlight; int loopingNo = 0; int previousElectrons = -1, previousPositrons = -1; do { Secondaries secondaries = { .electrons = {electrons.tracks, electrons.slotManager, electrons.queues.nextActive}, .positrons = {positrons.tracks, positrons.slotManager, positrons.queues.nextActive}, .gammas = {gammas.tracks, gammas.slotManager, gammas.queues.nextActive}, }; // *** ELECTRONS *** int numElectrons = stats->inFlight[ParticleType::Electron]; if (numElectrons > 0) { transportBlocks = (numElectrons + TransportThreads - 1) / TransportThreads; transportBlocks = ::min(transportBlocks, MaxBlocks); hipLaunchKernelGGL(( TransportElectrons), dim3(transportBlocks), dim3(TransportThreads), 0, electrons.stream, electrons.tracks, electrons.queues.currentlyActive, secondaries, electrons.queues.nextActive, globalScoring, scoringPerVolume); COPCORE_CUDA_CHECK(hipEventRecord(electrons.event, electrons.stream)); COPCORE_CUDA_CHECK(hipStreamWaitEvent(stream, electrons.event, 0)); } // *** POSITRONS *** int numPositrons = stats->inFlight[ParticleType::Positron]; if (numPositrons > 0) { transportBlocks = (numPositrons + TransportThreads - 1) / TransportThreads; transportBlocks = ::min(transportBlocks, MaxBlocks); hipLaunchKernelGGL(( TransportPositrons), dim3(transportBlocks), dim3(TransportThreads), 0, positrons.stream, positrons.tracks, positrons.queues.currentlyActive, secondaries, positrons.queues.nextActive, globalScoring, scoringPerVolume); COPCORE_CUDA_CHECK(hipEventRecord(positrons.event, positrons.stream)); COPCORE_CUDA_CHECK(hipStreamWaitEvent(stream, positrons.event, 0)); } // *** GAMMAS *** int numGammas = stats->inFlight[ParticleType::Gamma]; if (numGammas > 0) { transportBlocks = (numGammas + TransportThreads - 1) / TransportThreads; transportBlocks = ::min(transportBlocks, MaxBlocks); hipLaunchKernelGGL(( TransportGammas), dim3(transportBlocks), dim3(TransportThreads), 0, gammas.stream, gammas.tracks, gammas.queues.currentlyActive, secondaries, gammas.queues.nextActive, globalScoring, scoringPerVolume); COPCORE_CUDA_CHECK(hipEventRecord(gammas.event, gammas.stream)); COPCORE_CUDA_CHECK(hipStreamWaitEvent(stream, gammas.event, 0)); } // *** END OF TRANSPORT *** // The events ensure synchronization before finishing this iteration and // copying the Stats back to the host. AllParticleQueues queues = {{electrons.queues, positrons.queues, gammas.queues}}; hipLaunchKernelGGL(( FinishIteration), dim3(1), dim3(1), 0, stream, queues, stats_dev); COPCORE_CUDA_CHECK(hipMemcpyAsync(stats, stats_dev, sizeof(Stats), hipMemcpyDeviceToHost, stream)); // Finally synchronize all kernels. COPCORE_CUDA_CHECK(hipStreamSynchronize(stream)); // Count the number of particles in flight. inFlight = 0; for (int i = 0; i < ParticleType::NumParticleTypes; i++) { inFlight += stats->inFlight[i]; } // Swap the queues for the next iteration. electrons.queues.SwapActive(); positrons.queues.SwapActive(); gammas.queues.SwapActive(); // Check if only charged particles are left that are looping. numElectrons = stats->inFlight[ParticleType::Electron]; numPositrons = stats->inFlight[ParticleType::Positron]; numGammas = stats->inFlight[ParticleType::Gamma]; if (numElectrons == previousElectrons && numPositrons == previousPositrons && numGammas == 0) { loopingNo++; } else { previousElectrons = numElectrons; previousPositrons = numPositrons; loopingNo = 0; } } while (inFlight > 0 && loopingNo < 20); if (inFlight > 0) { constexpr int DepositThreads = 32; for (int i = 0; i < ParticleType::NumParticleTypes; i++) { ParticleType &pType = particles[i]; int inFlightParticles = stats->inFlight[i]; if (inFlightParticles == 0) { continue; } int depositBlocks = (inFlightParticles + DepositThreads - 1) / DepositThreads; depositBlocks = ::min(depositBlocks, MaxBlocks); hipLaunchKernelGGL(( DepositEnergy), dim3(depositBlocks), dim3(DepositThreads), 0, stream, pType.tracks, pType.queues.currentlyActive, globalScoring, scoringPerVolume); hipLaunchKernelGGL(( ClearQueue), dim3(1), dim3(1), 0, stream, pType.queues.currentlyActive); } COPCORE_CUDA_CHECK(hipStreamSynchronize(stream)); } } } void TestEm3(const vecgeom::cxx::VPlacedVolume *world, int numParticles, double energy, int numThreads, int batch, double startX, const int *MCIndex_host, ScoringPerVolume *scoringPerVolume_host, int numVolumes, GlobalScoring *globalScoring_host) { auto &cudaManager = vecgeom::cxx::CudaManager::Instance(); cudaManager.LoadGeometry(world); cudaManager.Synchronize(); const vecgeom::cuda::VPlacedVolume *world_dev = cudaManager.world_gpu(); InitBVH(); G4HepEmState *state = InitG4HepEm(); // Transfer MC indices. int *MCIndex_dev = nullptr; COPCORE_CUDA_CHECK(hipMalloc(&MCIndex_dev, sizeof(int) * numVolumes)); COPCORE_CUDA_CHECK(hipMemcpy(MCIndex_dev, MCIndex_host, sizeof(int) * numVolumes, hipMemcpyHostToDevice)); COPCORE_CUDA_CHECK(hipMemcpyToSymbol(MCIndex, &MCIndex_dev, sizeof(int *))); // Capacity of the different containers aka the maximum number of particles. constexpr int Capacity = 256 * 1024; std::cout << "INFO: capacity of containers set to " << Capacity << std::endl; if (batch == -1) { // Rule of thumb: at most 1000 particles of one type per GeV primary. batch = Capacity / ((int)energy / copcore::units::GeV) / 1000; } else if (batch < 1) { batch = 1; } std::cout << "INFO: batching " << batch << " particles for transport on the GPU" << std::endl; if (BzFieldValue != 0) { std::cout << "INFO: running with field Bz = " << BzFieldValue / copcore::units::tesla << " T" << std::endl; } else { std::cout << "INFO: running with magnetic field OFF" << std::endl; } // Allocate memory to score charged track length and energy deposit per volume. double *chargedTrackLength = nullptr; COPCORE_CUDA_CHECK(hipMalloc(&chargedTrackLength, sizeof(double) * numVolumes)); COPCORE_CUDA_CHECK(hipMemset(chargedTrackLength, 0, sizeof(double) * numVolumes)); double *energyDeposit = nullptr; COPCORE_CUDA_CHECK(hipMalloc(&energyDeposit, sizeof(double) * numVolumes)); COPCORE_CUDA_CHECK(hipMemset(energyDeposit, 0, sizeof(double) * numVolumes)); // Allocate and initialize scoring data structures. GlobalScoring *globalScoring = nullptr; COPCORE_CUDA_CHECK(hipMalloc(&globalScoring, sizeof(GlobalScoring))); COPCORE_CUDA_CHECK(hipMemset(globalScoring, 0, sizeof(GlobalScoring))); ScoringPerVolume *scoringPerVolume = nullptr; ScoringPerVolume scoringPerVolume_devPtrs; scoringPerVolume_devPtrs.chargedTrackLength = chargedTrackLength; scoringPerVolume_devPtrs.energyDeposit = energyDeposit; COPCORE_CUDA_CHECK(hipMalloc(&scoringPerVolume, sizeof(ScoringPerVolume))); COPCORE_CUDA_CHECK( hipMemcpy(scoringPerVolume, &scoringPerVolume_devPtrs, sizeof(ScoringPerVolume), hipMemcpyHostToDevice)); // Allocate memory to hold a "vanilla" SlotManager to initialize for each batch. SlotManager slotManagerInit(Capacity); SlotManager *slotManagerInit_dev = nullptr; COPCORE_CUDA_CHECK(hipMalloc(&slotManagerInit_dev, sizeof(SlotManager))); COPCORE_CUDA_CHECK(hipMemcpy(slotManagerInit_dev, &slotManagerInit, sizeof(SlotManager), hipMemcpyHostToDevice)); // Set up the threads, including their allocation. std::vector<std::thread> threads(numThreads); std::vector<ThreadData> threadData(numThreads); for (int t = 0; t < numThreads; t++) { ThreadData &data = threadData[t]; data.world = world_dev; data.MCIndex = MCIndex_dev; data.slotManagerInit = slotManagerInit_dev; data.scoringPerVolume = scoringPerVolume; data.globalScoring = globalScoring; data.id = t; data.threads = numThreads; data.numParticles = numParticles; data.batch = batch; data.energy = energy; data.startX = startX; data.Allocate(Capacity); } // Start the clock and launch the threads. vecgeom::Stopwatch timer; timer.Start(); std::cout << std::endl << "Simulating particles ..."; for (int t = 0; t < numThreads; t++) { threads[t] = std::thread(Worker, &threadData[t]); } // Join the threads. for (auto &&t : threads) { t.join(); } std::cout << " done!" << std::endl; auto time = timer.Stop(); std::cout << "Run time: " << time << "\n"; // Transfer back scoring. COPCORE_CUDA_CHECK(hipMemcpy(globalScoring_host, globalScoring, sizeof(GlobalScoring), hipMemcpyDeviceToHost)); // Transfer back the scoring per volume (charged track length and energy deposit). COPCORE_CUDA_CHECK(hipMemcpy(scoringPerVolume_host->chargedTrackLength, scoringPerVolume_devPtrs.chargedTrackLength, sizeof(double) * numVolumes, hipMemcpyDeviceToHost)); COPCORE_CUDA_CHECK(hipMemcpy(scoringPerVolume_host->energyDeposit, scoringPerVolume_devPtrs.energyDeposit, sizeof(double) * numVolumes, hipMemcpyDeviceToHost)); // Free resources. for (auto &&d : threadData) { d.Free(); } COPCORE_CUDA_CHECK(hipFree(MCIndex_dev)); COPCORE_CUDA_CHECK(hipFree(chargedTrackLength)); COPCORE_CUDA_CHECK(hipFree(energyDeposit)); COPCORE_CUDA_CHECK(hipFree(globalScoring)); COPCORE_CUDA_CHECK(hipFree(scoringPerVolume)); COPCORE_CUDA_CHECK(hipFree(slotManagerInit_dev)); FreeG4HepEm(state); }
7a0aeb3236a5c041519e74f71a562c853adeaec2.cu
// SPDX-FileCopyrightText: 2021 CERN // SPDX-License-Identifier: Apache-2.0 #include "TestEm3.h" #include "TestEm3.cuh" #include <AdePT/Atomic.h> #include <AdePT/BVHNavigator.h> #include <AdePT/MParray.h> #include <CopCore/Global.h> #include <CopCore/PhysicalConstants.h> #include <CopCore/Ranluxpp.h> #include <VecGeom/base/Config.h> #include <VecGeom/base/Stopwatch.h> #ifdef VECGEOM_ENABLE_CUDA #include <VecGeom/backend/cuda/Interface.h> #endif #include <G4HepEmData.hh> #include <G4HepEmElectronInit.hh> #include <G4HepEmGammaInit.hh> #include <G4HepEmMatCutData.hh> #include <G4HepEmMaterialInit.hh> #include <G4HepEmParameters.hh> #include <G4HepEmParametersInit.hh> #include <iostream> #include <iomanip> #include <stdio.h> #include <thread> #include <vector> __constant__ __device__ struct G4HepEmParameters g4HepEmPars; __constant__ __device__ struct G4HepEmData g4HepEmData; __constant__ __device__ int *MCIndex = nullptr; __constant__ __device__ int Zero = 0; struct G4HepEmState { G4HepEmData data; G4HepEmParameters parameters; }; static G4HepEmState *InitG4HepEm() { G4HepEmState *state = new G4HepEmState; InitG4HepEmData(&state->data); InitHepEmParameters(&state->parameters); InitMaterialAndCoupleData(&state->data, &state->parameters); InitElectronData(&state->data, &state->parameters, true); InitElectronData(&state->data, &state->parameters, false); InitGammaData(&state->data, &state->parameters); G4HepEmMatCutData *cutData = state->data.fTheMatCutData; std::cout << "fNumG4MatCuts = " << cutData->fNumG4MatCuts << ", fNumMatCutData = " << cutData->fNumMatCutData << std::endl; // Copy to GPU. CopyG4HepEmDataToGPU(&state->data); COPCORE_CUDA_CHECK(cudaMemcpyToSymbol(g4HepEmPars, &state->parameters, sizeof(G4HepEmParameters))); // Create G4HepEmData with the device pointers. G4HepEmData dataOnDevice; dataOnDevice.fTheMatCutData = state->data.fTheMatCutData_gpu; dataOnDevice.fTheMaterialData = state->data.fTheMaterialData_gpu; dataOnDevice.fTheElementData = state->data.fTheElementData_gpu; dataOnDevice.fTheElectronData = state->data.fTheElectronData_gpu; dataOnDevice.fThePositronData = state->data.fThePositronData_gpu; dataOnDevice.fTheSBTableData = state->data.fTheSBTableData_gpu; dataOnDevice.fTheGammaData = state->data.fTheGammaData_gpu; // The other pointers should never be used. dataOnDevice.fTheMatCutData_gpu = nullptr; dataOnDevice.fTheMaterialData_gpu = nullptr; dataOnDevice.fTheElementData_gpu = nullptr; dataOnDevice.fTheElectronData_gpu = nullptr; dataOnDevice.fThePositronData_gpu = nullptr; dataOnDevice.fTheSBTableData_gpu = nullptr; dataOnDevice.fTheGammaData_gpu = nullptr; COPCORE_CUDA_CHECK(cudaMemcpyToSymbol(g4HepEmData, &dataOnDevice, sizeof(G4HepEmData))); return state; } static void FreeG4HepEm(G4HepEmState *state) { FreeG4HepEmData(&state->data); delete state; } // A bundle of queues per particle type: // * Two for active particles, one for the current iteration and the second for the next. struct ParticleQueues { adept::MParray *currentlyActive; adept::MParray *nextActive; void SwapActive() { std::swap(currentlyActive, nextActive); } }; struct ParticleType { Track *tracks; SlotManager *slotManager; ParticleQueues queues; cudaStream_t stream; cudaEvent_t event; enum { Electron = 0, Positron = 1, Gamma = 2, NumParticleTypes, }; }; // A bundle of queues for the three particle types. struct AllParticleQueues { ParticleQueues queues[ParticleType::NumParticleTypes]; }; // Kernel to initialize the set of queues per particle type. __global__ void InitParticleQueues(ParticleQueues queues, size_t Capacity) { adept::MParray::MakeInstanceAt(Capacity, queues.currentlyActive); adept::MParray::MakeInstanceAt(Capacity, queues.nextActive); } // Kernel function to initialize a set of primary particles. __global__ void InitPrimaries(ParticleGenerator generator, int startEvent, int numEvents, double energy, double startX, const vecgeom::VPlacedVolume *world, GlobalScoring *globalScoring) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < numEvents; i += blockDim.x * gridDim.x) { Track &track = generator.NextTrack(); track.rngState.SetSeed(314159265 * (startEvent + i)); track.energy = energy; track.numIALeft[0] = -1.0; track.numIALeft[1] = -1.0; track.numIALeft[2] = -1.0; track.pos = {startX, 0, 0}; track.dir = {1.0, 0, 0}; track.navState.Clear(); BVHNavigator::LocatePointIn(world, track.pos, track.navState, true); atomicAdd(&globalScoring->numElectrons, 1); } } // A data structure to transfer statistics after each iteration. struct Stats { int inFlight[ParticleType::NumParticleTypes]; }; // Finish iteration: clear queues and fill statistics. __global__ void FinishIteration(AllParticleQueues all, Stats *stats) { for (int i = 0; i < ParticleType::NumParticleTypes; i++) { all.queues[i].currentlyActive->clear(); stats->inFlight[i] = all.queues[i].nextActive->size(); } } // Deposit energy of particles still in flight. __global__ void DepositEnergy(Track *allTracks, const adept::MParray *queue, GlobalScoring *globalScoring, ScoringPerVolume *scoringPerVolume) { int queueSize = queue->size(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < queueSize; i += blockDim.x * gridDim.x) { const int slot = (*queue)[i]; Track &currentTrack = allTracks[slot]; auto volume = currentTrack.navState.Top(); if (volume == nullptr) { // The particle left the world, why wasn't it killed before?! continue; } int volumeID = volume->id(); double energy = currentTrack.energy; atomicAdd(&globalScoring->energyDeposit, energy); atomicAdd(&scoringPerVolume->energyDeposit[volumeID], energy); } } __global__ void ClearQueue(adept::MParray *queue) { queue->clear(); } struct ThreadData { ParticleType particles[ParticleType::NumParticleTypes]; cudaStream_t stream; Stats *stats; Stats *stats_dev; const vecgeom::cuda::VPlacedVolume *world; const int *MCIndex; const SlotManager *slotManagerInit; ScoringPerVolume *scoringPerVolume; GlobalScoring *globalScoring; int id; int threads; int numParticles; int batch; double energy; double startX; void Allocate(size_t capacity) { // Allocate structures to manage tracks of an implicit type: // * memory to hold the actual Track elements, // * objects to manage slots inside the memory, // * queues of slots to remember active particle and those needing relocation, // * a stream and an event for synchronization of kernels. const size_t TracksSize = sizeof(Track) * capacity; const size_t QueueSize = adept::MParray::SizeOfInstance(capacity); for (int i = 0; i < ParticleType::NumParticleTypes; i++) { COPCORE_CUDA_CHECK(cudaMalloc(&particles[i].tracks, TracksSize)); COPCORE_CUDA_CHECK(cudaMalloc(&particles[i].slotManager, sizeof(SlotManager))); COPCORE_CUDA_CHECK(cudaMalloc(&particles[i].queues.currentlyActive, QueueSize)); COPCORE_CUDA_CHECK(cudaMalloc(&particles[i].queues.nextActive, QueueSize)); InitParticleQueues<<<1, 1>>>(particles[i].queues, capacity); COPCORE_CUDA_CHECK(cudaStreamCreate(&particles[i].stream)); COPCORE_CUDA_CHECK(cudaEventCreate(&particles[i].event)); } COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); // Create a stream to synchronize kernels of all particle types. COPCORE_CUDA_CHECK(cudaStreamCreate(&stream)); COPCORE_CUDA_CHECK(cudaMalloc(&stats_dev, sizeof(Stats))); COPCORE_CUDA_CHECK(cudaMallocHost(&stats, sizeof(Stats))); } void Free() { COPCORE_CUDA_CHECK(cudaFree(stats_dev)); COPCORE_CUDA_CHECK(cudaFreeHost(stats)); COPCORE_CUDA_CHECK(cudaStreamDestroy(stream)); for (int i = 0; i < ParticleType::NumParticleTypes; i++) { COPCORE_CUDA_CHECK(cudaFree(particles[i].tracks)); COPCORE_CUDA_CHECK(cudaFree(particles[i].slotManager)); COPCORE_CUDA_CHECK(cudaFree(particles[i].queues.currentlyActive)); COPCORE_CUDA_CHECK(cudaFree(particles[i].queues.nextActive)); COPCORE_CUDA_CHECK(cudaStreamDestroy(particles[i].stream)); COPCORE_CUDA_CHECK(cudaEventDestroy(particles[i].event)); } } }; static void Worker(ThreadData *data) { ParticleType *particles = data->particles; ParticleType &electrons = particles[ParticleType::Electron]; ParticleType &positrons = particles[ParticleType::Positron]; ParticleType &gammas = particles[ParticleType::Gamma]; Stats *stats = data->stats; Stats *stats_dev = data->stats_dev; cudaStream_t &stream = data->stream; ScoringPerVolume *scoringPerVolume = data->scoringPerVolume; GlobalScoring *globalScoring = data->globalScoring; // Calculate this thread's chunk. int perThread = data->numParticles / data->threads; int remainder = data->numParticles % data->threads; int startEvent = 1 + data->id * perThread; if (data->id < remainder) { perThread++; startEvent += data->id; } else { startEvent += remainder; } int endEvent = startEvent + perThread; for (; startEvent < endEvent; startEvent += data->batch) { int left = endEvent - startEvent; int chunk = std::min(left, data->batch); for (int i = 0; i < ParticleType::NumParticleTypes; i++) { COPCORE_CUDA_CHECK(cudaMemcpyAsync(particles[i].slotManager, data->slotManagerInit, sizeof(SlotManager), cudaMemcpyDeviceToDevice, stream)); } // Initialize primary particles. constexpr int InitThreads = 32; int initBlocks = (chunk + InitThreads - 1) / InitThreads; ParticleGenerator electronGenerator(electrons.tracks, electrons.slotManager, electrons.queues.currentlyActive); InitPrimaries<<<initBlocks, InitThreads, 0, stream>>>(electronGenerator, startEvent, chunk, data->energy, data->startX, data->world, globalScoring); COPCORE_CUDA_CHECK(cudaStreamSynchronize(stream)); stats->inFlight[ParticleType::Electron] = chunk; stats->inFlight[ParticleType::Positron] = 0; stats->inFlight[ParticleType::Gamma] = 0; constexpr int MaxBlocks = 1024; constexpr int TransportThreads = 32; int transportBlocks; int inFlight; int loopingNo = 0; int previousElectrons = -1, previousPositrons = -1; do { Secondaries secondaries = { .electrons = {electrons.tracks, electrons.slotManager, electrons.queues.nextActive}, .positrons = {positrons.tracks, positrons.slotManager, positrons.queues.nextActive}, .gammas = {gammas.tracks, gammas.slotManager, gammas.queues.nextActive}, }; // *** ELECTRONS *** int numElectrons = stats->inFlight[ParticleType::Electron]; if (numElectrons > 0) { transportBlocks = (numElectrons + TransportThreads - 1) / TransportThreads; transportBlocks = std::min(transportBlocks, MaxBlocks); TransportElectrons<<<transportBlocks, TransportThreads, 0, electrons.stream>>>( electrons.tracks, electrons.queues.currentlyActive, secondaries, electrons.queues.nextActive, globalScoring, scoringPerVolume); COPCORE_CUDA_CHECK(cudaEventRecord(electrons.event, electrons.stream)); COPCORE_CUDA_CHECK(cudaStreamWaitEvent(stream, electrons.event, 0)); } // *** POSITRONS *** int numPositrons = stats->inFlight[ParticleType::Positron]; if (numPositrons > 0) { transportBlocks = (numPositrons + TransportThreads - 1) / TransportThreads; transportBlocks = std::min(transportBlocks, MaxBlocks); TransportPositrons<<<transportBlocks, TransportThreads, 0, positrons.stream>>>( positrons.tracks, positrons.queues.currentlyActive, secondaries, positrons.queues.nextActive, globalScoring, scoringPerVolume); COPCORE_CUDA_CHECK(cudaEventRecord(positrons.event, positrons.stream)); COPCORE_CUDA_CHECK(cudaStreamWaitEvent(stream, positrons.event, 0)); } // *** GAMMAS *** int numGammas = stats->inFlight[ParticleType::Gamma]; if (numGammas > 0) { transportBlocks = (numGammas + TransportThreads - 1) / TransportThreads; transportBlocks = std::min(transportBlocks, MaxBlocks); TransportGammas<<<transportBlocks, TransportThreads, 0, gammas.stream>>>( gammas.tracks, gammas.queues.currentlyActive, secondaries, gammas.queues.nextActive, globalScoring, scoringPerVolume); COPCORE_CUDA_CHECK(cudaEventRecord(gammas.event, gammas.stream)); COPCORE_CUDA_CHECK(cudaStreamWaitEvent(stream, gammas.event, 0)); } // *** END OF TRANSPORT *** // The events ensure synchronization before finishing this iteration and // copying the Stats back to the host. AllParticleQueues queues = {{electrons.queues, positrons.queues, gammas.queues}}; FinishIteration<<<1, 1, 0, stream>>>(queues, stats_dev); COPCORE_CUDA_CHECK(cudaMemcpyAsync(stats, stats_dev, sizeof(Stats), cudaMemcpyDeviceToHost, stream)); // Finally synchronize all kernels. COPCORE_CUDA_CHECK(cudaStreamSynchronize(stream)); // Count the number of particles in flight. inFlight = 0; for (int i = 0; i < ParticleType::NumParticleTypes; i++) { inFlight += stats->inFlight[i]; } // Swap the queues for the next iteration. electrons.queues.SwapActive(); positrons.queues.SwapActive(); gammas.queues.SwapActive(); // Check if only charged particles are left that are looping. numElectrons = stats->inFlight[ParticleType::Electron]; numPositrons = stats->inFlight[ParticleType::Positron]; numGammas = stats->inFlight[ParticleType::Gamma]; if (numElectrons == previousElectrons && numPositrons == previousPositrons && numGammas == 0) { loopingNo++; } else { previousElectrons = numElectrons; previousPositrons = numPositrons; loopingNo = 0; } } while (inFlight > 0 && loopingNo < 20); if (inFlight > 0) { constexpr int DepositThreads = 32; for (int i = 0; i < ParticleType::NumParticleTypes; i++) { ParticleType &pType = particles[i]; int inFlightParticles = stats->inFlight[i]; if (inFlightParticles == 0) { continue; } int depositBlocks = (inFlightParticles + DepositThreads - 1) / DepositThreads; depositBlocks = std::min(depositBlocks, MaxBlocks); DepositEnergy<<<depositBlocks, DepositThreads, 0, stream>>>(pType.tracks, pType.queues.currentlyActive, globalScoring, scoringPerVolume); ClearQueue<<<1, 1, 0, stream>>>(pType.queues.currentlyActive); } COPCORE_CUDA_CHECK(cudaStreamSynchronize(stream)); } } } void TestEm3(const vecgeom::cxx::VPlacedVolume *world, int numParticles, double energy, int numThreads, int batch, double startX, const int *MCIndex_host, ScoringPerVolume *scoringPerVolume_host, int numVolumes, GlobalScoring *globalScoring_host) { auto &cudaManager = vecgeom::cxx::CudaManager::Instance(); cudaManager.LoadGeometry(world); cudaManager.Synchronize(); const vecgeom::cuda::VPlacedVolume *world_dev = cudaManager.world_gpu(); InitBVH(); G4HepEmState *state = InitG4HepEm(); // Transfer MC indices. int *MCIndex_dev = nullptr; COPCORE_CUDA_CHECK(cudaMalloc(&MCIndex_dev, sizeof(int) * numVolumes)); COPCORE_CUDA_CHECK(cudaMemcpy(MCIndex_dev, MCIndex_host, sizeof(int) * numVolumes, cudaMemcpyHostToDevice)); COPCORE_CUDA_CHECK(cudaMemcpyToSymbol(MCIndex, &MCIndex_dev, sizeof(int *))); // Capacity of the different containers aka the maximum number of particles. constexpr int Capacity = 256 * 1024; std::cout << "INFO: capacity of containers set to " << Capacity << std::endl; if (batch == -1) { // Rule of thumb: at most 1000 particles of one type per GeV primary. batch = Capacity / ((int)energy / copcore::units::GeV) / 1000; } else if (batch < 1) { batch = 1; } std::cout << "INFO: batching " << batch << " particles for transport on the GPU" << std::endl; if (BzFieldValue != 0) { std::cout << "INFO: running with field Bz = " << BzFieldValue / copcore::units::tesla << " T" << std::endl; } else { std::cout << "INFO: running with magnetic field OFF" << std::endl; } // Allocate memory to score charged track length and energy deposit per volume. double *chargedTrackLength = nullptr; COPCORE_CUDA_CHECK(cudaMalloc(&chargedTrackLength, sizeof(double) * numVolumes)); COPCORE_CUDA_CHECK(cudaMemset(chargedTrackLength, 0, sizeof(double) * numVolumes)); double *energyDeposit = nullptr; COPCORE_CUDA_CHECK(cudaMalloc(&energyDeposit, sizeof(double) * numVolumes)); COPCORE_CUDA_CHECK(cudaMemset(energyDeposit, 0, sizeof(double) * numVolumes)); // Allocate and initialize scoring data structures. GlobalScoring *globalScoring = nullptr; COPCORE_CUDA_CHECK(cudaMalloc(&globalScoring, sizeof(GlobalScoring))); COPCORE_CUDA_CHECK(cudaMemset(globalScoring, 0, sizeof(GlobalScoring))); ScoringPerVolume *scoringPerVolume = nullptr; ScoringPerVolume scoringPerVolume_devPtrs; scoringPerVolume_devPtrs.chargedTrackLength = chargedTrackLength; scoringPerVolume_devPtrs.energyDeposit = energyDeposit; COPCORE_CUDA_CHECK(cudaMalloc(&scoringPerVolume, sizeof(ScoringPerVolume))); COPCORE_CUDA_CHECK( cudaMemcpy(scoringPerVolume, &scoringPerVolume_devPtrs, sizeof(ScoringPerVolume), cudaMemcpyHostToDevice)); // Allocate memory to hold a "vanilla" SlotManager to initialize for each batch. SlotManager slotManagerInit(Capacity); SlotManager *slotManagerInit_dev = nullptr; COPCORE_CUDA_CHECK(cudaMalloc(&slotManagerInit_dev, sizeof(SlotManager))); COPCORE_CUDA_CHECK(cudaMemcpy(slotManagerInit_dev, &slotManagerInit, sizeof(SlotManager), cudaMemcpyHostToDevice)); // Set up the threads, including their allocation. std::vector<std::thread> threads(numThreads); std::vector<ThreadData> threadData(numThreads); for (int t = 0; t < numThreads; t++) { ThreadData &data = threadData[t]; data.world = world_dev; data.MCIndex = MCIndex_dev; data.slotManagerInit = slotManagerInit_dev; data.scoringPerVolume = scoringPerVolume; data.globalScoring = globalScoring; data.id = t; data.threads = numThreads; data.numParticles = numParticles; data.batch = batch; data.energy = energy; data.startX = startX; data.Allocate(Capacity); } // Start the clock and launch the threads. vecgeom::Stopwatch timer; timer.Start(); std::cout << std::endl << "Simulating particles ..."; for (int t = 0; t < numThreads; t++) { threads[t] = std::thread(Worker, &threadData[t]); } // Join the threads. for (auto &&t : threads) { t.join(); } std::cout << " done!" << std::endl; auto time = timer.Stop(); std::cout << "Run time: " << time << "\n"; // Transfer back scoring. COPCORE_CUDA_CHECK(cudaMemcpy(globalScoring_host, globalScoring, sizeof(GlobalScoring), cudaMemcpyDeviceToHost)); // Transfer back the scoring per volume (charged track length and energy deposit). COPCORE_CUDA_CHECK(cudaMemcpy(scoringPerVolume_host->chargedTrackLength, scoringPerVolume_devPtrs.chargedTrackLength, sizeof(double) * numVolumes, cudaMemcpyDeviceToHost)); COPCORE_CUDA_CHECK(cudaMemcpy(scoringPerVolume_host->energyDeposit, scoringPerVolume_devPtrs.energyDeposit, sizeof(double) * numVolumes, cudaMemcpyDeviceToHost)); // Free resources. for (auto &&d : threadData) { d.Free(); } COPCORE_CUDA_CHECK(cudaFree(MCIndex_dev)); COPCORE_CUDA_CHECK(cudaFree(chargedTrackLength)); COPCORE_CUDA_CHECK(cudaFree(energyDeposit)); COPCORE_CUDA_CHECK(cudaFree(globalScoring)); COPCORE_CUDA_CHECK(cudaFree(scoringPerVolume)); COPCORE_CUDA_CHECK(cudaFree(slotManagerInit_dev)); FreeG4HepEm(state); }
de9566464218772201bc6ace65fd724c95f6f14c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> /* * This program computes the sum of the elements of * vector v using the pairwise (cascading) sum algorithm. */ #define N 8 // length of vector v. MUST BE A POWER OF 2!!! // Fill the vector v with n random floating point numbers. void vfill(float* v, int n){ int i; for(i = 0; i < n; i++){ v[i] = (float) rand() / RAND_MAX; } } // Print the vector v. void vprint(float* v, int n){ int i; printf("v = \n"); for(i = 0; i < n; i++){ printf("%7.3f\n", v[i]); } printf("\n"); } // Pairwise-sum the elements of vector v and store the result in v[0]. __global__ void psum(float* v){ int t = threadIdx.x; // Thread index. int n = blockDim.x; // Should be half the length of v. while (n != 0) { if(t < n) v[t] += v[t + n]; __syncthreads(); n /= 2; } } int main (void){ float *v_h, *v_d; // host and device copies of our vector, respectively // dynamically allocate memory on the host for v_h v_h = (float*) malloc(N * sizeof(*v_h)); // dynamically allocate memory on the device for v_d hipMalloc ((float**) &v_d, N *sizeof(*v_d)); // Fill v_h with N random floating point numbers. vfill(v_h, N); // Print v_h to the console vprint(v_h, N); // Write the contents of v_h to v_d hipMemcpy( v_d, v_h, N * sizeof(float), hipMemcpyHostToDevice ); // Compute the pairwise sum of the elements of v_d and store the result in v_d[0]. hipLaunchKernelGGL(( psum), dim3(1), dim3(N/2) , 0, 0, v_d); // Write the pairwise sum, v_d[0], to v_h[0]. hipMemcpy(v_h, v_d, sizeof(float), hipMemcpyDeviceToHost ); // Print the pairwise sum. printf("Pairwise sum = %7.3f\n", v_h[0]); // Free dynamically-allocated host memory free(v_h); // Free dynamically-allocated device memory hipFree(v_d); }
de9566464218772201bc6ace65fd724c95f6f14c.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> /* * This program computes the sum of the elements of * vector v using the pairwise (cascading) sum algorithm. */ #define N 8 // length of vector v. MUST BE A POWER OF 2!!! // Fill the vector v with n random floating point numbers. void vfill(float* v, int n){ int i; for(i = 0; i < n; i++){ v[i] = (float) rand() / RAND_MAX; } } // Print the vector v. void vprint(float* v, int n){ int i; printf("v = \n"); for(i = 0; i < n; i++){ printf("%7.3f\n", v[i]); } printf("\n"); } // Pairwise-sum the elements of vector v and store the result in v[0]. __global__ void psum(float* v){ int t = threadIdx.x; // Thread index. int n = blockDim.x; // Should be half the length of v. while (n != 0) { if(t < n) v[t] += v[t + n]; __syncthreads(); n /= 2; } } int main (void){ float *v_h, *v_d; // host and device copies of our vector, respectively // dynamically allocate memory on the host for v_h v_h = (float*) malloc(N * sizeof(*v_h)); // dynamically allocate memory on the device for v_d cudaMalloc ((float**) &v_d, N *sizeof(*v_d)); // Fill v_h with N random floating point numbers. vfill(v_h, N); // Print v_h to the console vprint(v_h, N); // Write the contents of v_h to v_d cudaMemcpy( v_d, v_h, N * sizeof(float), cudaMemcpyHostToDevice ); // Compute the pairwise sum of the elements of v_d and store the result in v_d[0]. psum<<< 1, N/2 >>>(v_d); // Write the pairwise sum, v_d[0], to v_h[0]. cudaMemcpy(v_h, v_d, sizeof(float), cudaMemcpyDeviceToHost ); // Print the pairwise sum. printf("Pairwise sum = %7.3f\n", v_h[0]); // Free dynamically-allocated host memory free(v_h); // Free dynamically-allocated device memory cudaFree(v_d); }
b77182dfdac08b4e01619eaa501aa98b9ee0b3fb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matrix_multiply_tiling_cuda.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; hipMalloc(&A, XSIZE*YSIZE); int *B = NULL; hipMalloc(&B, XSIZE*YSIZE); int *C = NULL; hipMalloc(&C, XSIZE*YSIZE); int m = 2; int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matrix_multiply_tiling_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,m,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matrix_multiply_tiling_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,m,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matrix_multiply_tiling_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,m,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b77182dfdac08b4e01619eaa501aa98b9ee0b3fb.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matrix_multiply_tiling_cuda.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); int *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); int *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); int m = 2; int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matrix_multiply_tiling_cuda<<<gridBlock,threadBlock>>>(A,B,C,m,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matrix_multiply_tiling_cuda<<<gridBlock,threadBlock>>>(A,B,C,m,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matrix_multiply_tiling_cuda<<<gridBlock,threadBlock>>>(A,B,C,m,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5cbf1f738c1fcc50ba8f155a1829e58df1b75ec1.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include <vector> #include <hip/hip_runtime.h> #include "NPP_staging.hpp" #include "opencv2/gpu/device/warp.hpp" #include "opencv2/gpu/device/warp_shuffle.hpp" texture<Ncv8u, 1, hipReadModeElementType> tex8u; texture<Ncv32u, 1, hipReadModeElementType> tex32u; texture<uint2, 1, hipReadModeElementType> tex64u; //============================================================================== // // CUDA streams handling // //============================================================================== static hipStream_t nppStream = 0; hipStream_t nppStGetActiveCUDAstream(void) { return nppStream; } hipStream_t nppStSetActiveCUDAstream(hipStream_t cudaStream) { hipStream_t tmp = nppStream; nppStream = cudaStream; return tmp; } //============================================================================== // // BlockScan.cuh // //============================================================================== NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive //Almost the same as naive scan1Inclusive, but doesn't need __syncthreads() //assuming size <= WARP_SIZE and size is power of 2 template <class T> inline __device__ T warpScanInclusive(T idata, volatile T *s_Data) { #if __CUDA_ARCH__ >= 300 const unsigned int laneId = cv::gpu::device::Warp::laneId(); // scan on shuffl functions #pragma unroll for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2) { const T n = cv::gpu::device::shfl_up(idata, i); if (laneId >= i) idata += n; } return idata; #else Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; pos += K_WARP_SIZE; s_Data[pos] = idata; s_Data[pos] += s_Data[pos - 1]; s_Data[pos] += s_Data[pos - 2]; s_Data[pos] += s_Data[pos - 4]; s_Data[pos] += s_Data[pos - 8]; s_Data[pos] += s_Data[pos - 16]; return s_Data[pos]; #endif } inline __device__ Ncv64u warpScanInclusive(Ncv64u idata, volatile Ncv64u *s_Data) { Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; pos += K_WARP_SIZE; s_Data[pos] = idata; s_Data[pos] += s_Data[pos - 1]; s_Data[pos] += s_Data[pos - 2]; s_Data[pos] += s_Data[pos - 4]; s_Data[pos] += s_Data[pos - 8]; s_Data[pos] += s_Data[pos - 16]; return s_Data[pos]; } template <class T> inline __device__ T warpScanExclusive(T idata, volatile T *s_Data) { return warpScanInclusive(idata, s_Data) - idata; } template <class T, Ncv32u tiNumScanThreads> inline __device__ T blockScanInclusive(T idata, volatile T *s_Data) { if (tiNumScanThreads > K_WARP_SIZE) { //Bottom-level inclusive warp scan T warpResult = warpScanInclusive(idata, s_Data); //Save top elements of each warp for exclusive warp scan //sync to wait for warp scans to complete (because s_Data is being overwritten) __syncthreads(); if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) ) { s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult; } //wait for warp scans to complete __syncthreads(); if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) ) { //grab top warp elements T val = s_Data[threadIdx.x]; //calculate exclusive scan and write back to shared memory s_Data[threadIdx.x] = warpScanExclusive(val, s_Data); } //return updated warp scans with exclusive scan results __syncthreads(); return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE]; } else { return warpScanInclusive(idata, s_Data); } } //============================================================================== // // IntegralImage.cu // //============================================================================== const Ncv32u NUM_SCAN_THREADS = 256; const Ncv32u LOG2_NUM_SCAN_THREADS = 8; template<class T_in, class T_out> struct _scanElemOp { template<bool tbDoSqr> static inline __host__ __device__ T_out scanElemOp(T_in elem) { return scanElemOp( elem, Int2Type<(int)tbDoSqr>() ); } private: template <int v> struct Int2Type { enum { value = v }; }; static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<0>) { return (T_out)elem; } static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<1>) { return (T_out)(elem*elem); } }; template<class T> inline __device__ T readElem(T *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs); template<> inline __device__ Ncv8u readElem<Ncv8u>(Ncv8u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return tex1Dfetch(tex8u, texOffs + srcStride * blockIdx.x + curElemOffs); } template<> inline __device__ Ncv32u readElem<Ncv32u>(Ncv32u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } template<> inline __device__ Ncv32f readElem<Ncv32f>(Ncv32f *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } /** * \brief Segmented scan kernel * * Calculates per-row prefix scans of the input image. * Out-of-bounds safe: reads 'size' elements, writes 'size+1' elements * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * \tparam T_op Defines an operation to be performed on the input image pixels * * \param d_src [IN] Source image pointer * \param srcWidth [IN] Source image width * \param srcStride [IN] Source image stride * \param d_II [OUT] Output image pointer * \param IIstride [IN] Output image stride * * \return None */ template <class T_in, class T_out, bool tbDoSqr> __global__ void scanRows(T_in *d_src, Ncv32u texOffs, Ncv32u srcWidth, Ncv32u srcStride, T_out *d_II, Ncv32u IIstride) { //advance pointers to the current line if (sizeof(T_in) != 1) { d_src += srcStride * blockIdx.x; } //for initial image 8bit source we use texref tex8u d_II += IIstride * blockIdx.x; Ncv32u numBuckets = (srcWidth + NUM_SCAN_THREADS - 1) >> LOG2_NUM_SCAN_THREADS; Ncv32u offsetX = 0; __shared__ T_out shmem[NUM_SCAN_THREADS * 2]; __shared__ T_out carryElem; carryElem = 0; __syncthreads(); while (numBuckets--) { Ncv32u curElemOffs = offsetX + threadIdx.x; T_out curScanElem; T_in curElem; T_out curElemMod; if (curElemOffs < srcWidth) { //load elements curElem = readElem<T_in>(d_src, texOffs, srcStride, curElemOffs); } curElemMod = _scanElemOp<T_in, T_out>::scanElemOp<tbDoSqr>(curElem); //inclusive scan curScanElem = blockScanInclusive<T_out, NUM_SCAN_THREADS>(curElemMod, shmem); if (curElemOffs <= srcWidth) { //make scan exclusive and write the bucket to the output buffer d_II[curElemOffs] = carryElem + curScanElem - curElemMod; offsetX += NUM_SCAN_THREADS; } //remember last element for subsequent buckets adjustment __syncthreads(); if (threadIdx.x == NUM_SCAN_THREADS-1) { carryElem += curScanElem; } __syncthreads(); } if (offsetX == srcWidth && !threadIdx.x) { d_II[offsetX] = carryElem; } } template <bool tbDoSqr, class T_in, class T_out> NCVStatus scanRowsWrapperDevice(T_in *d_src, Ncv32u srcStride, T_out *d_dst, Ncv32u dstStride, NcvSize32u roi) { hipChannelFormatDesc cfdTex; size_t alignmentOffset = 0; if (sizeof(T_in) == 1) { cfdTex = hipCreateChannelDesc<Ncv8u>(); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); if (alignmentOffset > 0) { ncvAssertCUDAReturn(hipUnbindTexture(tex8u), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, alignmentOffset + roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); } } hipLaunchKernelGGL(( scanRows <T_in, T_out, tbDoSqr>) , dim3(roi.height), dim3(NUM_SCAN_THREADS), 0, nppStGetActiveCUDAstream(), d_src, (Ncv32u)alignmentOffset, roi.width, srcStride, d_dst, dstStride); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } static Ncv32u getPaddedDimension(Ncv32u dim, Ncv32u elemTypeSize, Ncv32u allocatorAlignment) { Ncv32u alignMask = allocatorAlignment-1; Ncv32u inverseAlignMask = ~alignMask; Ncv32u dimBytes = dim * elemTypeSize; Ncv32u pitch = (dimBytes + alignMask) & inverseAlignMask; Ncv32u PaddedDim = pitch / elemTypeSize; return PaddedDim; } template <class T_in, class T_out> NCVStatus ncvIntegralImage_device(T_in *d_src, Ncv32u srcStep, T_out *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(sizeof(T_out) == sizeof(Ncv32u), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width * sizeof(T_in) && dstStep >= (roi.width + 1) * sizeof(T_out) && srcStep % sizeof(T_in) == 0 && dstStep % sizeof(T_out) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T_in); dstStep /= sizeof(T_out); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); NCVMatrixAlloc<T_out> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<T_out> Tmp32_2(gpuAllocator, PaddedHeightII32, PaddedWidthII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_2.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(Tmp32_1.pitch() * Tmp32_1.height() == Tmp32_2.pitch() * Tmp32_2.height(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <false> (d_src, srcStep, Tmp32_1.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedWidthII32*sizeof(Ncv32u), (Ncv32u *)Tmp32_2.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false> (Tmp32_2.ptr(), PaddedHeightII32, Tmp32_1.ptr(), PaddedHeightII32, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), (Ncv32u *)d_dst, dstStep*sizeof(Ncv32u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus ncvSquaredIntegralImage_device(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width && dstStep >= (roi.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedWidthII64 = getPaddedDimension(WidthII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedHeightII64 = getPaddedDimension(HeightII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedWidthMax = PaddedWidthII32 > PaddedWidthII64 ? PaddedWidthII32 : PaddedWidthII64; Ncv32u PaddedHeightMax = PaddedHeightII32 > PaddedHeightII64 ? PaddedHeightII32 : PaddedHeightII64; NCVMatrixAlloc<Ncv32u> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<Ncv64u> Tmp64(gpuAllocator, PaddedWidthMax, PaddedHeightMax); ncvAssertReturn(Tmp64.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv32u> Tmp32_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv64u> Tmp64_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII64, PaddedHeightII64); ncvAssertReturn(Tmp64_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <true, Ncv8u, Ncv32u> (d_src, srcStep, Tmp32_2.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R(Tmp32_2.ptr(), PaddedWidthII32*sizeof(Ncv32u), Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false, Ncv32u, Ncv64u> (Tmp32_1.ptr(), PaddedHeightII32, Tmp64_2.ptr(), PaddedHeightII64, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_64u_C1R(Tmp64_2.ptr(), PaddedHeightII64*sizeof(Ncv64u), d_dst, dstStep*sizeof(Ncv64u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_8u32u(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv8u*)NULL, roiSize.width, (Ncv32u*)NULL, (roiSize.width+1) * sizeof(Ncv32u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_32f32f(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv32f*)NULL, roiSize.width * sizeof(Ncv32f), (Ncv32f*)NULL, (roiSize.width+1) * sizeof(Ncv32f), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegralGetSize_8u64u(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(NULL, roiSize.width, NULL, (roiSize.width+1) * sizeof(Ncv64u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv32u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R(Ncv32f *d_src, Ncv32u srcStep, Ncv32f *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv32u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv32u) && dstStep % sizeof(Ncv32u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv32u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv32u top = h_dst[(i-1) * dstStep + j]; Ncv32u left = h_dst[i * dstStep + (j - 1)]; Ncv32u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R_host(Ncv32f *h_src, Ncv32u srcStep, Ncv32f *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width * sizeof(Ncv32f) && dstStep >= (roiSize.width + 1) * sizeof(Ncv32f) && srcStep % sizeof(Ncv32f) == 0 && dstStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(Ncv32f); dstStep /= sizeof(Ncv32f); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0.0f; for (Ncv32u j=1; j<WidthII; j++) { Ncv32f top = h_dst[(i-1) * dstStep + j]; Ncv32f left = h_dst[i * dstStep + (j - 1)]; Ncv32f topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32f elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv64u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv64u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv64u top = h_dst[(i-1) * dstStep + j]; Ncv64u left = h_dst[i * dstStep + (j - 1)]; Ncv64u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv64u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem*elem + left - topleft + top; } } return NPPST_SUCCESS; } //============================================================================== // // Decimate.cu // //============================================================================== const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_X = 32; const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_Y = 8; template<class T, NcvBool tbCacheTexture> __device__ T getElem_Decimate(Ncv32u x, T *d_src); template<> __device__ Ncv32u getElem_Decimate<Ncv32u, true>(Ncv32u x, Ncv32u *d_src) { return tex1Dfetch(tex32u, x); } template<> __device__ Ncv32u getElem_Decimate<Ncv32u, false>(Ncv32u x, Ncv32u *d_src) { return d_src[x]; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, true>(Ncv32u x, Ncv64u *d_src) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, false>(Ncv32u x, Ncv64u *d_src) { return d_src[x]; } template <class T, NcvBool tbCacheTexture> __global__ void decimate_C1R(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u dstRoi, Ncv32u scale) { int curX = blockIdx.x * blockDim.x + threadIdx.x; int curY = blockIdx.y * blockDim.y + threadIdx.y; if (curX >= dstRoi.width || curY >= dstRoi.height) { return; } d_dst[curY * dstStep + curX] = getElem_Decimate<T, tbCacheTexture>((curY * srcStep + curX) * scale, d_src); } template <class T> static NCVStatus decimateWrapperDevice(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale), NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; dim3 grid((dstRoi.width + NUM_DOWNSAMPLE_NEAREST_THREADS_X - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_X, (dstRoi.height + NUM_DOWNSAMPLE_NEAREST_THREADS_Y - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_Y); dim3 block(NUM_DOWNSAMPLE_NEAREST_THREADS_X, NUM_DOWNSAMPLE_NEAREST_THREADS_Y); if (!readThruTexture) { hipLaunchKernelGGL(( decimate_C1R <T, false>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcStep, d_dst, dstStep, dstRoi, scale); } else { hipChannelFormatDesc cfdTexSrc; if (sizeof(T) == sizeof(Ncv32u)) { cfdTexSrc = hipCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex32u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } else { cfdTexSrc = hipCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex64u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } hipLaunchKernelGGL(( decimate_C1R <T, true>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcStep, d_dst, dstStep, dstRoi, scale); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus decimateWrapperHost(T *h_src, Ncv32u srcStep, T *h_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width != 0 && srcRoi.height != 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale) && srcStep % sizeof(T) == 0 && dstStep % sizeof(T) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; for (Ncv32u i=0; i<dstRoi.height; i++) { for (Ncv32u j=0; j<dstRoi.width; j++) { h_dst[i*dstStep+j] = h_src[i*scale*srcStep + j*scale]; } } return NPPST_SUCCESS; } #define implementNppDecimate(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) \ { \ return decimateWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, \ srcRoi, scale, readThruTexture); \ } #define implementNppDecimateHost(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale) \ { \ return decimateWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, \ srcRoi, scale); \ } implementNppDecimate(32, u) implementNppDecimate(32, s) implementNppDecimate(32, f) implementNppDecimate(64, u) implementNppDecimate(64, s) implementNppDecimate(64, f) implementNppDecimateHost(32, u) implementNppDecimateHost(32, s) implementNppDecimateHost(32, f) implementNppDecimateHost(64, u) implementNppDecimateHost(64, s) implementNppDecimateHost(64, f) //============================================================================== // // RectStdDev.cu // //============================================================================== const Ncv32u NUM_RECTSTDDEV_THREADS = 128; template <NcvBool tbCacheTexture> __device__ Ncv32u getElemSum(Ncv32u x, Ncv32u *d_sum) { if (tbCacheTexture) { return tex1Dfetch(tex32u, x); } else { return d_sum[x]; } } template <NcvBool tbCacheTexture> __device__ Ncv64u getElemSqSum(Ncv32u x, Ncv64u *d_sqsum) { if (tbCacheTexture) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } else { return d_sqsum[x]; } } template <NcvBool tbCacheTexture> __global__ void rectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f invRectArea) { Ncv32u x_offs = blockIdx.x * NUM_RECTSTDDEV_THREADS + threadIdx.x; if (x_offs >= roi.width) { return; } Ncv32u sum_offset = blockIdx.y * sumStep + x_offs; Ncv32u sqsum_offset = blockIdx.y * sqsumStep + x_offs; //OPT: try swapping order (could change cache hit/miss ratio) Ncv32u sum_tl = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x, d_sum); Ncv32u sum_bl = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x, d_sum); Ncv32u sum_tr = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_br = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl, sqsum_bl, sqsum_tr, sqsum_br; sqsum_tl = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x, d_sqsum); sqsum_bl = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x, d_sqsum); sqsum_tr = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x + rect.width, d_sqsum); sqsum_br = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width, d_sqsum); Ncv64u sqsum_val = sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl; Ncv32f mean = sum_val * invRectArea; ////////////////////////////////////////////////////////////////////////// // sqsum_val_res = sqsum_val / rectArea ////////////////////////////////////////////////////////////////////////// Ncv32f sqsum_val_1 = __ull2float_rz(sqsum_val); Ncv64u sqsum_val_2 = __float2ull_rz(sqsum_val_1); Ncv64u sqsum_val_3 = sqsum_val - sqsum_val_2; Ncv32f sqsum_val_4 = __ull2float_rn(sqsum_val_3); sqsum_val_1 *= invRectArea; sqsum_val_4 *= invRectArea; Ncv32f sqsum_val_res = sqsum_val_1 + sqsum_val_4; ////////////////////////////////////////////////////////////////////////// // variance = sqsum_val_res - mean * mean ////////////////////////////////////////////////////////////////////////// #if defined DISABLE_MAD_SELECTIVELY Ncv32f variance = sqsum_val_2 - __fmul_rn(mean, mean); #else Ncv32f variance = sqsum_val_res - mean * mean; #endif ////////////////////////////////////////////////////////////////////////// // stddev = sqrtf(variance) ////////////////////////////////////////////////////////////////////////// //Ncv32f stddev = sqrtf(variance); Ncv32f stddev = __fsqrt_rn(variance); d_norm[blockIdx.y * normStep + x_offs] = stddev; } NCVStatus nppiStRectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea, NcvBool readThruTexture) { ncvAssertReturn(d_sum != NULL && d_sqsum != NULL && d_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; dim3 grid(((roi.width + NUM_RECTSTDDEV_THREADS - 1) / NUM_RECTSTDDEV_THREADS), roi.height); dim3 block(NUM_RECTSTDDEV_THREADS); if (!readThruTexture) { hipLaunchKernelGGL(( rectStdDev_32f_C1R <false>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_sum, sumStep, d_sqsum, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } else { hipChannelFormatDesc cfdTexSrc; hipChannelFormatDesc cfdTexSqr; cfdTexSrc = hipCreateChannelDesc<Ncv32u>(); cfdTexSqr = hipCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex32u, d_sum, cfdTexSrc, (roi.height + rect.y + rect.height) * sumStep * sizeof(Ncv32u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex64u, d_sqsum, cfdTexSqr, (roi.height + rect.y + rect.height) * sqsumStep * sizeof(Ncv64u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); hipLaunchKernelGGL(( rectStdDev_32f_C1R <true>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), NULL, sumStep, NULL, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStRectStdDev_32f_C1R_host(Ncv32u *h_sum, Ncv32u sumStep, Ncv64u *h_sqsum, Ncv32u sqsumStep, Ncv32f *h_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea) { ncvAssertReturn(h_sum != NULL && h_sqsum != NULL && h_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; for (Ncv32u i=0; i<roi.height; i++) { for (Ncv32u j=0; j<roi.width; j++) { Ncv32u sum_offset = i * sumStep + j; Ncv32u sqsum_offset = i * sqsumStep + j; Ncv32u sum_tl = h_sum[sum_offset + rect.y * sumStep + rect.x]; Ncv32u sum_bl = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x]; Ncv32u sum_tr = h_sum[sum_offset + rect.y * sumStep + rect.x + rect.width]; Ncv32u sum_br = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width]; Ncv64f sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x]; Ncv64u sqsum_bl = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x]; Ncv64u sqsum_tr = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x + rect.width]; Ncv64u sqsum_br = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width]; Ncv64f sqsum_val = (Ncv64f)(sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl); Ncv64f mean = sum_val * invRectArea; Ncv64f sqsum_val_2 = sqsum_val / rectArea; Ncv64f variance = sqsum_val_2 - mean * mean; h_norm[i * normStep + j] = (Ncv32f)sqrt(variance); } } return NPPST_SUCCESS; } //============================================================================== // // Transpose.cu // //============================================================================== const Ncv32u TRANSPOSE_TILE_DIM = 16; const Ncv32u TRANSPOSE_BLOCK_ROWS = 16; /** * \brief Matrix transpose kernel * * Calculates transpose of the input image * \see TRANSPOSE_TILE_DIM * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * * \param d_src [IN] Source image pointer * \param srcStride [IN] Source image stride * \param d_dst [OUT] Output image pointer * \param dstStride [IN] Output image stride * * \return None */ template <class T> __global__ void transpose(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { __shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1]; Ncv32u blockIdx_x, blockIdx_y; // do diagonal reordering if (gridDim.x == gridDim.y) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x; } else { Ncv32u bid = blockIdx.x + gridDim.x * blockIdx.y; blockIdx_y = bid % gridDim.y; blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x; } Ncv32u xIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.x; Ncv32u yIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.y; Ncv32u index_gmem = xIndex + yIndex * srcStride; if (xIndex < srcRoi.width) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.height) { tile[threadIdx.y+i][threadIdx.x] = d_src[index_gmem+i*srcStride]; } } } __syncthreads(); xIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.x; yIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.y; index_gmem = xIndex + yIndex * dstStride; if (xIndex < srcRoi.height) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.width) { d_dst[index_gmem+i*dstStride] = tile[threadIdx.x][threadIdx.y+i]; } } } } template <class T> NCVStatus transposeWrapperDevice(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); dim3 grid((srcRoi.width + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM, (srcRoi.height + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM); dim3 block(TRANSPOSE_TILE_DIM, TRANSPOSE_TILE_DIM); hipLaunchKernelGGL(( transpose <T>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcStride, d_dst, dstStride, srcRoi); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus transposeWrapperHost(T *h_src, Ncv32u srcStride, T *h_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); for (Ncv32u i=0; i<srcRoi.height; i++) { for (Ncv32u j=0; j<srcRoi.width; j++) { h_dst[j*dstStride+i] = h_src[i*srcStride + j]; } } return NPPST_SUCCESS; } #define implementNppTranspose(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) \ { \ return transposeWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, srcRoi); \ } #define implementNppTransposeHost(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi) \ { \ return transposeWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, srcRoi); \ } implementNppTranspose(32,u) implementNppTranspose(32,s) implementNppTranspose(32,f) implementNppTranspose(64,u) implementNppTranspose(64,s) implementNppTranspose(64,f) implementNppTransposeHost(32,u) implementNppTransposeHost(32,s) implementNppTransposeHost(32,f) implementNppTransposeHost(64,u) implementNppTransposeHost(64,s) implementNppTransposeHost(64,f) NCVStatus nppiStTranspose_128_C1R(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperDevice<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } NCVStatus nppiStTranspose_128_C1R_host(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperHost<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } //============================================================================== // // Compact.cu // //============================================================================== const Ncv32u NUM_REMOVE_THREADS = 256; template <bool bRemove, bool bWritePartial> __global__ void removePass1Scan(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_blockSums, Ncv32u elemRemove) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn > srcLen + blockDim.x) { return; } __shared__ Ncv32u shmem[NUM_REMOVE_THREADS * 2]; Ncv32u scanElem = 0; if (elemAddrIn < srcLen) { if (bRemove) { scanElem = (d_src[elemAddrIn] != elemRemove) ? 1 : 0; } else { scanElem = d_src[elemAddrIn]; } } Ncv32u localScanInc = blockScanInclusive<Ncv32u, NUM_REMOVE_THREADS>(scanElem, shmem); __syncthreads(); if (elemAddrIn < srcLen) { if (threadIdx.x == NUM_REMOVE_THREADS-1 && bWritePartial) { d_blockSums[blockId] = localScanInc; } if (bRemove) { d_offsets[elemAddrIn] = localScanInc - scanElem; } else { d_src[elemAddrIn] = localScanInc - scanElem; } } } __global__ void removePass2Adjust(Ncv32u *d_offsets, Ncv32u srcLen, Ncv32u *d_blockSums) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } __shared__ Ncv32u valOffs; valOffs = d_blockSums[blockId]; __syncthreads(); d_offsets[elemAddrIn] += valOffs; } __global__ void removePass3Compact(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_dst, Ncv32u elemRemove, Ncv32u *dstLenValue) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } Ncv32u elem = d_src[elemAddrIn]; Ncv32u elemAddrOut = d_offsets[elemAddrIn]; if (elem != elemRemove) { d_dst[elemAddrOut] = elem; } if (elemAddrIn == srcLen-1) { if (elem != elemRemove) { *dstLenValue = elemAddrOut + 1; } else { *dstLenValue = elemAddrOut; } } } NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *dstLenPinned, Ncv32u elemRemove, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLenPinned != NULL) { *dstLenPinned = 0; } return NPPST_SUCCESS; } std::vector<Ncv32u> partSumNums; std::vector<Ncv32u> partSumOffsets; Ncv32u partSumLastNum = srcLen; Ncv32u partSumLastOffs = 0; do { partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); Ncv32u curPartSumAlignedLength = alignUp(partSumLastNum * sizeof(Ncv32u), gpuAllocator.alignment()) / sizeof(Ncv32u); partSumLastOffs += curPartSumAlignedLength; partSumLastNum = (partSumLastNum + NUM_REMOVE_THREADS - 1) / NUM_REMOVE_THREADS; } while (partSumLastNum>1); partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); NCVVectorAlloc<Ncv32u> d_hierSums(gpuAllocator, partSumLastOffs+1); ncvAssertReturn(gpuAllocator.isCounting() || d_hierSums.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVVectorAlloc<Ncv32u> d_numDstElements(gpuAllocator, 1); ncvAssertReturn(gpuAllocator.isCounting() || d_numDstElements.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN dim3 block(NUM_REMOVE_THREADS); //calculate zero-level partial sums for indices calculation if (partSumNums.size() > 2) { dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } hipLaunchKernelGGL(( removePass1Scan <true, true>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcLen, d_hierSums.ptr(), d_hierSums.ptr() + partSumOffsets[1], elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //calculate hierarchical partial sums for (Ncv32u i=1; i<partSumNums.size()-1; i++) { dim3 grid_partial(partSumNums[i+1]); if (grid_partial.x > 65535) { grid_partial.y = (grid_partial.x + 65534) / 65535; grid_partial.x = 65535; } if (grid_partial.x != 1) { hipLaunchKernelGGL(( removePass1Scan <false, true>) , dim3(grid_partial), dim3(block), 0, nppStGetActiveCUDAstream(), d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, d_hierSums.ptr() + partSumOffsets[i+1], 0); } else { hipLaunchKernelGGL(( removePass1Scan <false, false>) , dim3(grid_partial), dim3(block), 0, nppStGetActiveCUDAstream(), d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, NULL, 0); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //adjust hierarchical partial sums for (Ncv32s i=(Ncv32s)partSumNums.size()-3; i>=0; i--) { dim3 grid_local(partSumNums[i+1]); if (grid_local.x > 65535) { grid_local.y = (grid_local.x + 65534) / 65535; grid_local.x = 65535; } hipLaunchKernelGGL(( removePass2Adjust) , dim3(grid_local), dim3(block), 0, nppStGetActiveCUDAstream(), d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], d_hierSums.ptr() + partSumOffsets[i+1]); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } } else { dim3 grid_local(partSumNums[1]); hipLaunchKernelGGL(( removePass1Scan <true, false>) , dim3(grid_local), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcLen, d_hierSums.ptr(), NULL, elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //compact source vector using indices dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } hipLaunchKernelGGL(( removePass3Compact) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcLen, d_hierSums.ptr(), d_dst, elemRemove, d_numDstElements.ptr()); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //get number of dst elements if (dstLenPinned != NULL) { ncvAssertCUDAReturn(hipMemcpyAsync(dstLenPinned, d_numDstElements.ptr(), sizeof(Ncv32u), hipMemcpyDeviceToHost, nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); } NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32u(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { *pBufsize = 0; return NPPST_SUCCESS; } NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(NULL, srcLen, NULL, NULL, 0xC001C0DE, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32s(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompactGetSize_32f(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompact_32u(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *p_dstLen, Ncv32u elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(d_src, srcLen, d_dst, p_dstLen, elemRemove, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s(Ncv32s *d_src, Ncv32u srcLen, Ncv32s *d_dst, Ncv32u *p_dstLen, Ncv32s elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp); } #if defined __GNUC__ && __GNUC__ > 2 && __GNUC_MINOR__ > 4 typedef Ncv32u __attribute__((__may_alias__)) Ncv32u_a; #else typedef Ncv32u Ncv32u_a; #endif NCVStatus nppsStCompact_32f(Ncv32f *d_src, Ncv32u srcLen, Ncv32f *d_dst, Ncv32u *p_dstLen, Ncv32f elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u_a *)&elemRemove, pBuffer, bufSize, devProp); } NCVStatus nppsStCompact_32u_host(Ncv32u *h_src, Ncv32u srcLen, Ncv32u *h_dst, Ncv32u *dstLen, Ncv32u elemRemove) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLen != NULL) { *dstLen = 0; } return NPPST_SUCCESS; } Ncv32u dstIndex = 0; for (Ncv32u srcIndex=0; srcIndex<srcLen; srcIndex++) { if (h_src[srcIndex] != elemRemove) { h_dst[dstIndex++] = h_src[srcIndex]; } } if (dstLen != NULL) { *dstLen = dstIndex; } return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s_host(Ncv32s *h_src, Ncv32u srcLen, Ncv32s *h_dst, Ncv32u *dstLen, Ncv32s elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove); } NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen, Ncv32f *h_dst, Ncv32u *dstLen, Ncv32f elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove); } //============================================================================== // // Filter.cu // //============================================================================== texture <float, 1, hipReadModeElementType> texSrc; texture <float, 1, hipReadModeElementType> texKernel; __forceinline__ __device__ float getValueMirrorRow(const int rowOffset, int i, int w) { if (i < 0) i = 1 - i; if (i >= w) i = w + w - i - 1; return tex1Dfetch (texSrc, rowOffset + i); } __forceinline__ __device__ float getValueMirrorColumn(const int offset, const int rowStep, int j, int h) { if (j < 0) j = 1 - j; if (j >= h) j = h + h - j - 1; return tex1Dfetch (texSrc, offset + j * rowStep); } __global__ void FilterRowBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { // position within ROI const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int j = roi.y + iy; const int rowOffset = j * srcStep + roi.x; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorRow (rowOffset, ix + m - p, roi.width) * tex1Dfetch (texKernel, m); } pDst[iy * dstStep + ix] = sum * multiplier; } __global__ void FilterColumnBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int i = roi.x + ix; const int offset = i + roi.y * srcStep; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorColumn (offset, srcStep, iy + m - p, roi.height) * tex1Dfetch (texKernel, m); } pDst[ix + iy * dstStep] = sum * multiplier; } NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } hipChannelFormatDesc floatChannel = hipCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; hipBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); hipBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderNone: return NPPST_ERROR; case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: hipLaunchKernelGGL(( FilterRowBorderMirror_32f_C1R) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } hipChannelFormatDesc floatChannel = hipCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; hipBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); hipBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: hipLaunchKernelGGL(( FilterColumnBorderMirror_32f_C1R) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } //============================================================================== // // FrameInterpolate.cu // //============================================================================== inline Ncv32u iDivUp(Ncv32u num, Ncv32u denom) { return (num + denom - 1)/denom; } texture<float, 2, hipReadModeElementType> tex_src1; texture<float, 2, hipReadModeElementType> tex_src0; __global__ void BlendFramesKernel(const float *u, const float *v, // forward flow const float *ur, const float *vr, // backward flow const float *o0, const float *o1, // coverage masks int w, int h, int s, float theta, float *out) { const int ix = threadIdx.x + blockDim.x * blockIdx.x; const int iy = threadIdx.y + blockDim.y * blockIdx.y; const int pos = ix + s * iy; if (ix >= w || iy >= h) return; float _u = u[pos]; float _v = v[pos]; float _ur = ur[pos]; float _vr = vr[pos]; float x = (float)ix + 0.5f; float y = (float)iy + 0.5f; bool b0 = o0[pos] > 1e-4f; bool b1 = o1[pos] > 1e-4f; if (b0 && b1) { // pixel is visible on both frames out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta) * (1.0f - theta) + tex2D(tex_src1, x + _u * (1.0f - theta), y + _v * (1.0f - theta)) * theta; } else if (b0) { // visible on the first frame only out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta); } else { // visible on the second frame only out[pos] = tex2D(tex_src1, x - _ur * (1.0f - theta), y - _vr * (1.0f - theta)); } } NCVStatus BlendFrames(const Ncv32f *src0, const Ncv32f *src1, const Ncv32f *ufi, const Ncv32f *vfi, const Ncv32f *ubi, const Ncv32f *vbi, const Ncv32f *o1, const Ncv32f *o2, Ncv32u width, Ncv32u height, Ncv32u stride, Ncv32f theta, Ncv32f *out) { tex_src1.addressMode[0] = hipAddressModeClamp; tex_src1.addressMode[1] = hipAddressModeClamp; tex_src1.filterMode = hipFilterModeLinear; tex_src1.normalized = false; tex_src0.addressMode[0] = hipAddressModeClamp; tex_src0.addressMode[1] = hipAddressModeClamp; tex_src0.filterMode = hipFilterModeLinear; tex_src0.normalized = false; hipChannelFormatDesc desc = hipCreateChannelDesc <float> (); const Ncv32u pitch = stride * sizeof (float); ncvAssertCUDAReturn (hipBindTexture2D (0, tex_src1, src1, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn (hipBindTexture2D (0, tex_src0, src0, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); dim3 threads (32, 4); dim3 blocks (iDivUp (width, threads.x), iDivUp (height, threads.y)); hipLaunchKernelGGL(( BlendFramesKernel), dim3(blocks), dim3(threads), 0, nppStGetActiveCUDAstream (), ufi, vfi, ubi, vbi, o1, o2, width, height, stride, theta, out); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStGetInterpolationBufferSize(NcvSize32u srcSize, Ncv32u nStep, Ncv32u *hpSize) { NCVStatus status = NPPST_ERROR; status = nppiStVectorWarpGetBufferSize(srcSize, nStep, hpSize); return status; } NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState) { // check state validity ncvAssertReturn (pState->pSrcFrame0 != 0 && pState->pSrcFrame1 != 0 && pState->pFU != 0 && pState->pFV != 0 && pState->pBU != 0 && pState->pBV != 0 && pState->pNewFrame != 0 && pState->ppBuffers[0] != 0 && pState->ppBuffers[1] != 0 && pState->ppBuffers[2] != 0 && pState->ppBuffers[3] != 0 && pState->ppBuffers[4] != 0 && pState->ppBuffers[5] != 0, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (pState->size.width > 0 && pState->size.height > 0, NPPST_ERROR); ncvAssertReturn (pState->nStep >= pState->size.width * sizeof (Ncv32f) && pState->nStep > 0 && pState->nStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); // change notation Ncv32f *cov0 = pState->ppBuffers[0]; Ncv32f *cov1 = pState->ppBuffers[1]; Ncv32f *fwdU = pState->ppBuffers[2]; // forward u Ncv32f *fwdV = pState->ppBuffers[3]; // forward v Ncv32f *bwdU = pState->ppBuffers[4]; // backward u Ncv32f *bwdV = pState->ppBuffers[5]; // backward v // warp flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFU, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFV, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdV) ); // warp backward flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBU, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBV, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); // interpolate frame ncvAssertReturnNcvStat ( BlendFrames (pState->pSrcFrame0, pState->pSrcFrame1, fwdU, fwdV, bwdU, bwdV, cov0, cov1, pState->size.width, pState->size.height, pState->nStep / sizeof (Ncv32f), pState->pos, pState->pNewFrame) ); return NPPST_SUCCESS; } //============================================================================== // // VectorWarpFrame.cu // //============================================================================== #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) // FP32 atomic add static __forceinline__ __device__ float _atomicAdd(float *addr, float val) { float old = *addr, assumed; do { assumed = old; old = int_as_float(__iAtomicCAS((int*)addr, float_as_int(assumed), float_as_int(val+assumed))); } while( assumed!=old ); return old; } #else #define _atomicAdd atomicAdd #endif __global__ void ForwardWarpKernel_PSF2x2(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *normalization_factor, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; //bottom left corner of a target pixel float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f; float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f; // pixel containing bottom left corner float px; float py; float dx = modff (cx, &px); float dy = modff (cy, &py); // target pixel integer coords int tx; int ty; tx = (int) px; ty = (int) py; float value = src[image_row_offset + j]; float weight; // fill pixel containing bottom right corner if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing bottom left corner tx -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper left corner ty -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper right corner tx += 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } } __global__ void ForwardWarpKernel_PSF1x1(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; float u_ = u[flow_row_offset + j]; float v_ = v[flow_row_offset + j]; //bottom left corner of target pixel float cx = u_ * time_scale + (float)j + 1.0f; float cy = v_ * time_scale + (float)i + 1.0f; // pixel containing bottom left corner int tx = __float2int_rn (cx); int ty = __float2int_rn (cy); float value = src[image_row_offset + j]; // fill pixel if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { _atomicAdd (dst + ty * image_stride + tx, value); } } __global__ void NormalizeKernel(const float *normalization_factor, int w, int h, int s, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * s + j; float scale = normalization_factor[pos]; float invScale = (scale == 0.0f) ? 1.0f : (1.0f / scale); image[pos] *= invScale; } __global__ void MemsetKernel(const float value, int w, int h, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * w + j; image[pos] = value; } NCVStatus nppiStVectorWarpGetBufferSize (NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32u *hpSize) { ncvAssertReturn (hpSize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep, NPPST_INVALID_STEP); *hpSize = nSrcStep * srcSize.height; return NPPST_SUCCESS; } // does not require normalization NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof (Ncv32f); dim3 ctaSize (32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); hipLaunchKernelGGL(( ForwardWarpKernel_PSF1x1) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStVectorWarp_PSF2x2_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f *pBuffer, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL && pBuffer != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof(Ncv32f); dim3 ctaSize(32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); hipLaunchKernelGGL(( MemsetKernel) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), 0, srcSize.width, srcSize.height, pBuffer); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); hipLaunchKernelGGL(( ForwardWarpKernel_PSF2x2) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pBuffer, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); hipLaunchKernelGGL(( NormalizeKernel) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), pBuffer, srcSize.width, srcSize.height, srcStep, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } //============================================================================== // // Resize.cu // //============================================================================== texture <float, 2, hipReadModeElementType> texSrc2D; __forceinline__ __device__ float processLine(int spos, float xmin, float xmax, int ixmin, int ixmax, float fxmin, float cxmax) { // first element float wsum = 1.0f - xmin + fxmin; float sum = tex1Dfetch(texSrc, spos) * (1.0f - xmin + fxmin); spos++; for (int ix = ixmin + 1; ix < ixmax; ++ix) { sum += tex1Dfetch(texSrc, spos); spos++; wsum += 1.0f; } sum += tex1Dfetch(texSrc, spos) * (cxmax - xmax); wsum += cxmax - xmax; return sum / wsum; } __global__ void resizeSuperSample_32f(NcvSize32u srcSize, Ncv32u srcStep, NcvRect32u srcROI, Ncv32f *dst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { // position within dst ROI const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } float rw = (float) srcROI.width; float rh = (float) srcROI.height; // source position float x = scaleX * (float) ix; float y = scaleY * (float) iy; // x sampling range float xBegin = fmax (x - scaleX, 0.0f); float xEnd = fmin (x + scaleX, rw - 1.0f); // y sampling range float yBegin = fmax (y - scaleY, 0.0f); float yEnd = fmin (y + scaleY, rh - 1.0f); // x range of source samples float floorXBegin = floorf (xBegin); float ceilXEnd = ceilf (xEnd); int iXBegin = srcROI.x + (int) floorXBegin; int iXEnd = srcROI.x + (int) ceilXEnd; // y range of source samples float floorYBegin = floorf (yBegin); float ceilYEnd = ceilf (yEnd); int iYBegin = srcROI.y + (int) floorYBegin; int iYEnd = srcROI.y + (int) ceilYEnd; // first row int pos = iYBegin * srcStep + iXBegin; float wsum = 1.0f - yBegin + floorYBegin; float sum = processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (1.0f - yBegin + floorYBegin); pos += srcStep; for (int iy = iYBegin + 1; iy < iYEnd; ++iy) { sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd); pos += srcStep; wsum += 1.0f; } sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (ceilYEnd - yEnd); wsum += ceilYEnd - yEnd; sum /= wsum; dst[(ix + dstROI.x) + (iy + dstROI.y) * dstStep] = sum; } // bicubic interpolation __forceinline__ __device__ float bicubicCoeff(float x_) { float x = fabsf(x_); if (x <= 1.0f) { return x * x * (1.5f * x - 2.5f) + 1.0f; } else if (x < 2.0f) { return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; } else { return 0.0f; } } __global__ void resizeBicubic(NcvSize32u srcSize, NcvRect32u srcROI, NcvSize32u dstSize, Ncv32u dstStep, Ncv32f *dst, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } const float dx = 1.0f / srcROI.width; const float dy = 1.0f / srcROI.height; float rx = (float) srcROI.x; float ry = (float) srcROI.y; float rw = (float) srcROI.width; float rh = (float) srcROI.height; float x = scaleX * (float) ix; float y = scaleY * (float) iy; // sampling range // border mode is clamp float xmin = fmax (ceilf (x - 2.0f), 0.0f); float xmax = fmin (floorf (x + 2.0f), rw - 1.0f); float ymin = fmax (ceilf (y - 2.0f), 0.0f); float ymax = fmin (floorf (y + 2.0f), rh - 1.0f); // shift data window to match ROI rx += 0.5f; ry += 0.5f; x += rx; y += ry; xmin += rx; xmax += rx; ymin += ry; ymax += ry; float sum = 0.0f; float wsum = 0.0f; for (float cy = ymin; cy <= ymax; cy += 1.0f) { for (float cx = xmin; cx <= xmax; cx += 1.0f) { float xDist = x - cx; float yDist = y - cy; float wx = bicubicCoeff (xDist); float wy = bicubicCoeff (yDist); wx *= wy; sum += wx * tex2D (texSrc2D, cx * dx, cy * dy); wsum += wx; } } dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = (!wsum)? 0 : sum / wsum; } NCVStatus nppiStResize_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, NcvRect32u srcROI, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u dstROI, Ncv32f xFactor, Ncv32f yFactor, NppStInterpMode interpolation) { NCVStatus status = NPPST_SUCCESS; ncvAssertReturn (pSrc != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (xFactor != 0.0 && yFactor != 0.0, NPPST_INVALID_SCALE); ncvAssertReturn (nSrcStep >= sizeof (Ncv32f) * (Ncv32u) srcSize.width && nDstStep >= sizeof (Ncv32f) * (Ncv32f) dstSize.width, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // TODO: preprocess ROI to prevent out of bounds access if (interpolation == nppStSupersample) { // bind texture hipBindTexture (0, texSrc, pSrc, srcSize.height * nSrcStep); // invoke kernel dim3 ctaSize (32, 6); dim3 gridSize ((dstROI.width + ctaSize.x - 1) / ctaSize.x, (dstROI.height + ctaSize.y - 1) / ctaSize.y); hipLaunchKernelGGL(( resizeSuperSample_32f) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcSize, srcStep, srcROI, pDst, dstSize, dstStep, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else if (interpolation == nppStBicubic) { texSrc2D.addressMode[0] = hipAddressModeMirror; texSrc2D.addressMode[1] = hipAddressModeMirror; texSrc2D.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc <float> (); hipBindTexture2D (0, texSrc2D, pSrc, desc, srcSize.width, srcSize.height, nSrcStep); dim3 ctaSize (32, 6); dim3 gridSize ((dstSize.width + ctaSize.x - 1) / ctaSize.x, (dstSize.height + ctaSize.y - 1) / ctaSize.y); hipLaunchKernelGGL(( resizeBicubic) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcSize, srcROI, dstSize, dstStep, pDst, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else { status = NPPST_ERROR; } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return status; } #endif /* CUDA_DISABLER */
5cbf1f738c1fcc50ba8f155a1829e58df1b75ec1.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include <vector> #include <cuda_runtime.h> #include "NPP_staging.hpp" #include "opencv2/gpu/device/warp.hpp" #include "opencv2/gpu/device/warp_shuffle.hpp" texture<Ncv8u, 1, cudaReadModeElementType> tex8u; texture<Ncv32u, 1, cudaReadModeElementType> tex32u; texture<uint2, 1, cudaReadModeElementType> tex64u; //============================================================================== // // CUDA streams handling // //============================================================================== static cudaStream_t nppStream = 0; cudaStream_t nppStGetActiveCUDAstream(void) { return nppStream; } cudaStream_t nppStSetActiveCUDAstream(cudaStream_t cudaStream) { cudaStream_t tmp = nppStream; nppStream = cudaStream; return tmp; } //============================================================================== // // BlockScan.cuh // //============================================================================== NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive //Almost the same as naive scan1Inclusive, but doesn't need __syncthreads() //assuming size <= WARP_SIZE and size is power of 2 template <class T> inline __device__ T warpScanInclusive(T idata, volatile T *s_Data) { #if __CUDA_ARCH__ >= 300 const unsigned int laneId = cv::gpu::device::Warp::laneId(); // scan on shuffl functions #pragma unroll for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2) { const T n = cv::gpu::device::shfl_up(idata, i); if (laneId >= i) idata += n; } return idata; #else Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; pos += K_WARP_SIZE; s_Data[pos] = idata; s_Data[pos] += s_Data[pos - 1]; s_Data[pos] += s_Data[pos - 2]; s_Data[pos] += s_Data[pos - 4]; s_Data[pos] += s_Data[pos - 8]; s_Data[pos] += s_Data[pos - 16]; return s_Data[pos]; #endif } inline __device__ Ncv64u warpScanInclusive(Ncv64u idata, volatile Ncv64u *s_Data) { Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; pos += K_WARP_SIZE; s_Data[pos] = idata; s_Data[pos] += s_Data[pos - 1]; s_Data[pos] += s_Data[pos - 2]; s_Data[pos] += s_Data[pos - 4]; s_Data[pos] += s_Data[pos - 8]; s_Data[pos] += s_Data[pos - 16]; return s_Data[pos]; } template <class T> inline __device__ T warpScanExclusive(T idata, volatile T *s_Data) { return warpScanInclusive(idata, s_Data) - idata; } template <class T, Ncv32u tiNumScanThreads> inline __device__ T blockScanInclusive(T idata, volatile T *s_Data) { if (tiNumScanThreads > K_WARP_SIZE) { //Bottom-level inclusive warp scan T warpResult = warpScanInclusive(idata, s_Data); //Save top elements of each warp for exclusive warp scan //sync to wait for warp scans to complete (because s_Data is being overwritten) __syncthreads(); if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) ) { s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult; } //wait for warp scans to complete __syncthreads(); if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) ) { //grab top warp elements T val = s_Data[threadIdx.x]; //calculate exclusive scan and write back to shared memory s_Data[threadIdx.x] = warpScanExclusive(val, s_Data); } //return updated warp scans with exclusive scan results __syncthreads(); return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE]; } else { return warpScanInclusive(idata, s_Data); } } //============================================================================== // // IntegralImage.cu // //============================================================================== const Ncv32u NUM_SCAN_THREADS = 256; const Ncv32u LOG2_NUM_SCAN_THREADS = 8; template<class T_in, class T_out> struct _scanElemOp { template<bool tbDoSqr> static inline __host__ __device__ T_out scanElemOp(T_in elem) { return scanElemOp( elem, Int2Type<(int)tbDoSqr>() ); } private: template <int v> struct Int2Type { enum { value = v }; }; static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<0>) { return (T_out)elem; } static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<1>) { return (T_out)(elem*elem); } }; template<class T> inline __device__ T readElem(T *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs); template<> inline __device__ Ncv8u readElem<Ncv8u>(Ncv8u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return tex1Dfetch(tex8u, texOffs + srcStride * blockIdx.x + curElemOffs); } template<> inline __device__ Ncv32u readElem<Ncv32u>(Ncv32u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } template<> inline __device__ Ncv32f readElem<Ncv32f>(Ncv32f *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } /** * \brief Segmented scan kernel * * Calculates per-row prefix scans of the input image. * Out-of-bounds safe: reads 'size' elements, writes 'size+1' elements * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * \tparam T_op Defines an operation to be performed on the input image pixels * * \param d_src [IN] Source image pointer * \param srcWidth [IN] Source image width * \param srcStride [IN] Source image stride * \param d_II [OUT] Output image pointer * \param IIstride [IN] Output image stride * * \return None */ template <class T_in, class T_out, bool tbDoSqr> __global__ void scanRows(T_in *d_src, Ncv32u texOffs, Ncv32u srcWidth, Ncv32u srcStride, T_out *d_II, Ncv32u IIstride) { //advance pointers to the current line if (sizeof(T_in) != 1) { d_src += srcStride * blockIdx.x; } //for initial image 8bit source we use texref tex8u d_II += IIstride * blockIdx.x; Ncv32u numBuckets = (srcWidth + NUM_SCAN_THREADS - 1) >> LOG2_NUM_SCAN_THREADS; Ncv32u offsetX = 0; __shared__ T_out shmem[NUM_SCAN_THREADS * 2]; __shared__ T_out carryElem; carryElem = 0; __syncthreads(); while (numBuckets--) { Ncv32u curElemOffs = offsetX + threadIdx.x; T_out curScanElem; T_in curElem; T_out curElemMod; if (curElemOffs < srcWidth) { //load elements curElem = readElem<T_in>(d_src, texOffs, srcStride, curElemOffs); } curElemMod = _scanElemOp<T_in, T_out>::scanElemOp<tbDoSqr>(curElem); //inclusive scan curScanElem = blockScanInclusive<T_out, NUM_SCAN_THREADS>(curElemMod, shmem); if (curElemOffs <= srcWidth) { //make scan exclusive and write the bucket to the output buffer d_II[curElemOffs] = carryElem + curScanElem - curElemMod; offsetX += NUM_SCAN_THREADS; } //remember last element for subsequent buckets adjustment __syncthreads(); if (threadIdx.x == NUM_SCAN_THREADS-1) { carryElem += curScanElem; } __syncthreads(); } if (offsetX == srcWidth && !threadIdx.x) { d_II[offsetX] = carryElem; } } template <bool tbDoSqr, class T_in, class T_out> NCVStatus scanRowsWrapperDevice(T_in *d_src, Ncv32u srcStride, T_out *d_dst, Ncv32u dstStride, NcvSize32u roi) { cudaChannelFormatDesc cfdTex; size_t alignmentOffset = 0; if (sizeof(T_in) == 1) { cfdTex = cudaCreateChannelDesc<Ncv8u>(); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); if (alignmentOffset > 0) { ncvAssertCUDAReturn(cudaUnbindTexture(tex8u), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, alignmentOffset + roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); } } scanRows <T_in, T_out, tbDoSqr> <<<roi.height, NUM_SCAN_THREADS, 0, nppStGetActiveCUDAstream()>>> (d_src, (Ncv32u)alignmentOffset, roi.width, srcStride, d_dst, dstStride); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } static Ncv32u getPaddedDimension(Ncv32u dim, Ncv32u elemTypeSize, Ncv32u allocatorAlignment) { Ncv32u alignMask = allocatorAlignment-1; Ncv32u inverseAlignMask = ~alignMask; Ncv32u dimBytes = dim * elemTypeSize; Ncv32u pitch = (dimBytes + alignMask) & inverseAlignMask; Ncv32u PaddedDim = pitch / elemTypeSize; return PaddedDim; } template <class T_in, class T_out> NCVStatus ncvIntegralImage_device(T_in *d_src, Ncv32u srcStep, T_out *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(sizeof(T_out) == sizeof(Ncv32u), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width * sizeof(T_in) && dstStep >= (roi.width + 1) * sizeof(T_out) && srcStep % sizeof(T_in) == 0 && dstStep % sizeof(T_out) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T_in); dstStep /= sizeof(T_out); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); NCVMatrixAlloc<T_out> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<T_out> Tmp32_2(gpuAllocator, PaddedHeightII32, PaddedWidthII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_2.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(Tmp32_1.pitch() * Tmp32_1.height() == Tmp32_2.pitch() * Tmp32_2.height(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <false> (d_src, srcStep, Tmp32_1.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedWidthII32*sizeof(Ncv32u), (Ncv32u *)Tmp32_2.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false> (Tmp32_2.ptr(), PaddedHeightII32, Tmp32_1.ptr(), PaddedHeightII32, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), (Ncv32u *)d_dst, dstStep*sizeof(Ncv32u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus ncvSquaredIntegralImage_device(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width && dstStep >= (roi.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedWidthII64 = getPaddedDimension(WidthII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedHeightII64 = getPaddedDimension(HeightII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedWidthMax = PaddedWidthII32 > PaddedWidthII64 ? PaddedWidthII32 : PaddedWidthII64; Ncv32u PaddedHeightMax = PaddedHeightII32 > PaddedHeightII64 ? PaddedHeightII32 : PaddedHeightII64; NCVMatrixAlloc<Ncv32u> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<Ncv64u> Tmp64(gpuAllocator, PaddedWidthMax, PaddedHeightMax); ncvAssertReturn(Tmp64.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv32u> Tmp32_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv64u> Tmp64_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII64, PaddedHeightII64); ncvAssertReturn(Tmp64_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <true, Ncv8u, Ncv32u> (d_src, srcStep, Tmp32_2.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R(Tmp32_2.ptr(), PaddedWidthII32*sizeof(Ncv32u), Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false, Ncv32u, Ncv64u> (Tmp32_1.ptr(), PaddedHeightII32, Tmp64_2.ptr(), PaddedHeightII64, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_64u_C1R(Tmp64_2.ptr(), PaddedHeightII64*sizeof(Ncv64u), d_dst, dstStep*sizeof(Ncv64u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_8u32u(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv8u*)NULL, roiSize.width, (Ncv32u*)NULL, (roiSize.width+1) * sizeof(Ncv32u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_32f32f(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv32f*)NULL, roiSize.width * sizeof(Ncv32f), (Ncv32f*)NULL, (roiSize.width+1) * sizeof(Ncv32f), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegralGetSize_8u64u(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(NULL, roiSize.width, NULL, (roiSize.width+1) * sizeof(Ncv64u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv32u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R(Ncv32f *d_src, Ncv32u srcStep, Ncv32f *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv32u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv32u) && dstStep % sizeof(Ncv32u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv32u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv32u top = h_dst[(i-1) * dstStep + j]; Ncv32u left = h_dst[i * dstStep + (j - 1)]; Ncv32u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R_host(Ncv32f *h_src, Ncv32u srcStep, Ncv32f *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width * sizeof(Ncv32f) && dstStep >= (roiSize.width + 1) * sizeof(Ncv32f) && srcStep % sizeof(Ncv32f) == 0 && dstStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(Ncv32f); dstStep /= sizeof(Ncv32f); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0.0f; for (Ncv32u j=1; j<WidthII; j++) { Ncv32f top = h_dst[(i-1) * dstStep + j]; Ncv32f left = h_dst[i * dstStep + (j - 1)]; Ncv32f topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32f elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv64u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv64u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv64u top = h_dst[(i-1) * dstStep + j]; Ncv64u left = h_dst[i * dstStep + (j - 1)]; Ncv64u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv64u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem*elem + left - topleft + top; } } return NPPST_SUCCESS; } //============================================================================== // // Decimate.cu // //============================================================================== const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_X = 32; const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_Y = 8; template<class T, NcvBool tbCacheTexture> __device__ T getElem_Decimate(Ncv32u x, T *d_src); template<> __device__ Ncv32u getElem_Decimate<Ncv32u, true>(Ncv32u x, Ncv32u *d_src) { return tex1Dfetch(tex32u, x); } template<> __device__ Ncv32u getElem_Decimate<Ncv32u, false>(Ncv32u x, Ncv32u *d_src) { return d_src[x]; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, true>(Ncv32u x, Ncv64u *d_src) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, false>(Ncv32u x, Ncv64u *d_src) { return d_src[x]; } template <class T, NcvBool tbCacheTexture> __global__ void decimate_C1R(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u dstRoi, Ncv32u scale) { int curX = blockIdx.x * blockDim.x + threadIdx.x; int curY = blockIdx.y * blockDim.y + threadIdx.y; if (curX >= dstRoi.width || curY >= dstRoi.height) { return; } d_dst[curY * dstStep + curX] = getElem_Decimate<T, tbCacheTexture>((curY * srcStep + curX) * scale, d_src); } template <class T> static NCVStatus decimateWrapperDevice(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale), NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; dim3 grid((dstRoi.width + NUM_DOWNSAMPLE_NEAREST_THREADS_X - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_X, (dstRoi.height + NUM_DOWNSAMPLE_NEAREST_THREADS_Y - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_Y); dim3 block(NUM_DOWNSAMPLE_NEAREST_THREADS_X, NUM_DOWNSAMPLE_NEAREST_THREADS_Y); if (!readThruTexture) { decimate_C1R <T, false> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcStep, d_dst, dstStep, dstRoi, scale); } else { cudaChannelFormatDesc cfdTexSrc; if (sizeof(T) == sizeof(Ncv32u)) { cfdTexSrc = cudaCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex32u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } else { cfdTexSrc = cudaCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex64u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } decimate_C1R <T, true> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcStep, d_dst, dstStep, dstRoi, scale); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus decimateWrapperHost(T *h_src, Ncv32u srcStep, T *h_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width != 0 && srcRoi.height != 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale) && srcStep % sizeof(T) == 0 && dstStep % sizeof(T) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; for (Ncv32u i=0; i<dstRoi.height; i++) { for (Ncv32u j=0; j<dstRoi.width; j++) { h_dst[i*dstStep+j] = h_src[i*scale*srcStep + j*scale]; } } return NPPST_SUCCESS; } #define implementNppDecimate(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) \ { \ return decimateWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, \ srcRoi, scale, readThruTexture); \ } #define implementNppDecimateHost(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale) \ { \ return decimateWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, \ srcRoi, scale); \ } implementNppDecimate(32, u) implementNppDecimate(32, s) implementNppDecimate(32, f) implementNppDecimate(64, u) implementNppDecimate(64, s) implementNppDecimate(64, f) implementNppDecimateHost(32, u) implementNppDecimateHost(32, s) implementNppDecimateHost(32, f) implementNppDecimateHost(64, u) implementNppDecimateHost(64, s) implementNppDecimateHost(64, f) //============================================================================== // // RectStdDev.cu // //============================================================================== const Ncv32u NUM_RECTSTDDEV_THREADS = 128; template <NcvBool tbCacheTexture> __device__ Ncv32u getElemSum(Ncv32u x, Ncv32u *d_sum) { if (tbCacheTexture) { return tex1Dfetch(tex32u, x); } else { return d_sum[x]; } } template <NcvBool tbCacheTexture> __device__ Ncv64u getElemSqSum(Ncv32u x, Ncv64u *d_sqsum) { if (tbCacheTexture) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } else { return d_sqsum[x]; } } template <NcvBool tbCacheTexture> __global__ void rectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f invRectArea) { Ncv32u x_offs = blockIdx.x * NUM_RECTSTDDEV_THREADS + threadIdx.x; if (x_offs >= roi.width) { return; } Ncv32u sum_offset = blockIdx.y * sumStep + x_offs; Ncv32u sqsum_offset = blockIdx.y * sqsumStep + x_offs; //OPT: try swapping order (could change cache hit/miss ratio) Ncv32u sum_tl = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x, d_sum); Ncv32u sum_bl = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x, d_sum); Ncv32u sum_tr = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_br = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl, sqsum_bl, sqsum_tr, sqsum_br; sqsum_tl = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x, d_sqsum); sqsum_bl = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x, d_sqsum); sqsum_tr = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x + rect.width, d_sqsum); sqsum_br = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width, d_sqsum); Ncv64u sqsum_val = sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl; Ncv32f mean = sum_val * invRectArea; ////////////////////////////////////////////////////////////////////////// // sqsum_val_res = sqsum_val / rectArea ////////////////////////////////////////////////////////////////////////// Ncv32f sqsum_val_1 = __ull2float_rz(sqsum_val); Ncv64u sqsum_val_2 = __float2ull_rz(sqsum_val_1); Ncv64u sqsum_val_3 = sqsum_val - sqsum_val_2; Ncv32f sqsum_val_4 = __ull2float_rn(sqsum_val_3); sqsum_val_1 *= invRectArea; sqsum_val_4 *= invRectArea; Ncv32f sqsum_val_res = sqsum_val_1 + sqsum_val_4; ////////////////////////////////////////////////////////////////////////// // variance = sqsum_val_res - mean * mean ////////////////////////////////////////////////////////////////////////// #if defined DISABLE_MAD_SELECTIVELY Ncv32f variance = sqsum_val_2 - __fmul_rn(mean, mean); #else Ncv32f variance = sqsum_val_res - mean * mean; #endif ////////////////////////////////////////////////////////////////////////// // stddev = sqrtf(variance) ////////////////////////////////////////////////////////////////////////// //Ncv32f stddev = sqrtf(variance); Ncv32f stddev = __fsqrt_rn(variance); d_norm[blockIdx.y * normStep + x_offs] = stddev; } NCVStatus nppiStRectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea, NcvBool readThruTexture) { ncvAssertReturn(d_sum != NULL && d_sqsum != NULL && d_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; dim3 grid(((roi.width + NUM_RECTSTDDEV_THREADS - 1) / NUM_RECTSTDDEV_THREADS), roi.height); dim3 block(NUM_RECTSTDDEV_THREADS); if (!readThruTexture) { rectStdDev_32f_C1R <false> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_sum, sumStep, d_sqsum, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } else { cudaChannelFormatDesc cfdTexSrc; cudaChannelFormatDesc cfdTexSqr; cfdTexSrc = cudaCreateChannelDesc<Ncv32u>(); cfdTexSqr = cudaCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex32u, d_sum, cfdTexSrc, (roi.height + rect.y + rect.height) * sumStep * sizeof(Ncv32u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex64u, d_sqsum, cfdTexSqr, (roi.height + rect.y + rect.height) * sqsumStep * sizeof(Ncv64u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); rectStdDev_32f_C1R <true> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (NULL, sumStep, NULL, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStRectStdDev_32f_C1R_host(Ncv32u *h_sum, Ncv32u sumStep, Ncv64u *h_sqsum, Ncv32u sqsumStep, Ncv32f *h_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea) { ncvAssertReturn(h_sum != NULL && h_sqsum != NULL && h_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; for (Ncv32u i=0; i<roi.height; i++) { for (Ncv32u j=0; j<roi.width; j++) { Ncv32u sum_offset = i * sumStep + j; Ncv32u sqsum_offset = i * sqsumStep + j; Ncv32u sum_tl = h_sum[sum_offset + rect.y * sumStep + rect.x]; Ncv32u sum_bl = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x]; Ncv32u sum_tr = h_sum[sum_offset + rect.y * sumStep + rect.x + rect.width]; Ncv32u sum_br = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width]; Ncv64f sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x]; Ncv64u sqsum_bl = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x]; Ncv64u sqsum_tr = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x + rect.width]; Ncv64u sqsum_br = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width]; Ncv64f sqsum_val = (Ncv64f)(sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl); Ncv64f mean = sum_val * invRectArea; Ncv64f sqsum_val_2 = sqsum_val / rectArea; Ncv64f variance = sqsum_val_2 - mean * mean; h_norm[i * normStep + j] = (Ncv32f)sqrt(variance); } } return NPPST_SUCCESS; } //============================================================================== // // Transpose.cu // //============================================================================== const Ncv32u TRANSPOSE_TILE_DIM = 16; const Ncv32u TRANSPOSE_BLOCK_ROWS = 16; /** * \brief Matrix transpose kernel * * Calculates transpose of the input image * \see TRANSPOSE_TILE_DIM * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * * \param d_src [IN] Source image pointer * \param srcStride [IN] Source image stride * \param d_dst [OUT] Output image pointer * \param dstStride [IN] Output image stride * * \return None */ template <class T> __global__ void transpose(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { __shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1]; Ncv32u blockIdx_x, blockIdx_y; // do diagonal reordering if (gridDim.x == gridDim.y) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x; } else { Ncv32u bid = blockIdx.x + gridDim.x * blockIdx.y; blockIdx_y = bid % gridDim.y; blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x; } Ncv32u xIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.x; Ncv32u yIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.y; Ncv32u index_gmem = xIndex + yIndex * srcStride; if (xIndex < srcRoi.width) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.height) { tile[threadIdx.y+i][threadIdx.x] = d_src[index_gmem+i*srcStride]; } } } __syncthreads(); xIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.x; yIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.y; index_gmem = xIndex + yIndex * dstStride; if (xIndex < srcRoi.height) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.width) { d_dst[index_gmem+i*dstStride] = tile[threadIdx.x][threadIdx.y+i]; } } } } template <class T> NCVStatus transposeWrapperDevice(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); dim3 grid((srcRoi.width + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM, (srcRoi.height + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM); dim3 block(TRANSPOSE_TILE_DIM, TRANSPOSE_TILE_DIM); transpose <T> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcStride, d_dst, dstStride, srcRoi); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus transposeWrapperHost(T *h_src, Ncv32u srcStride, T *h_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); for (Ncv32u i=0; i<srcRoi.height; i++) { for (Ncv32u j=0; j<srcRoi.width; j++) { h_dst[j*dstStride+i] = h_src[i*srcStride + j]; } } return NPPST_SUCCESS; } #define implementNppTranspose(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) \ { \ return transposeWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, srcRoi); \ } #define implementNppTransposeHost(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi) \ { \ return transposeWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, srcRoi); \ } implementNppTranspose(32,u) implementNppTranspose(32,s) implementNppTranspose(32,f) implementNppTranspose(64,u) implementNppTranspose(64,s) implementNppTranspose(64,f) implementNppTransposeHost(32,u) implementNppTransposeHost(32,s) implementNppTransposeHost(32,f) implementNppTransposeHost(64,u) implementNppTransposeHost(64,s) implementNppTransposeHost(64,f) NCVStatus nppiStTranspose_128_C1R(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperDevice<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } NCVStatus nppiStTranspose_128_C1R_host(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperHost<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } //============================================================================== // // Compact.cu // //============================================================================== const Ncv32u NUM_REMOVE_THREADS = 256; template <bool bRemove, bool bWritePartial> __global__ void removePass1Scan(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_blockSums, Ncv32u elemRemove) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn > srcLen + blockDim.x) { return; } __shared__ Ncv32u shmem[NUM_REMOVE_THREADS * 2]; Ncv32u scanElem = 0; if (elemAddrIn < srcLen) { if (bRemove) { scanElem = (d_src[elemAddrIn] != elemRemove) ? 1 : 0; } else { scanElem = d_src[elemAddrIn]; } } Ncv32u localScanInc = blockScanInclusive<Ncv32u, NUM_REMOVE_THREADS>(scanElem, shmem); __syncthreads(); if (elemAddrIn < srcLen) { if (threadIdx.x == NUM_REMOVE_THREADS-1 && bWritePartial) { d_blockSums[blockId] = localScanInc; } if (bRemove) { d_offsets[elemAddrIn] = localScanInc - scanElem; } else { d_src[elemAddrIn] = localScanInc - scanElem; } } } __global__ void removePass2Adjust(Ncv32u *d_offsets, Ncv32u srcLen, Ncv32u *d_blockSums) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } __shared__ Ncv32u valOffs; valOffs = d_blockSums[blockId]; __syncthreads(); d_offsets[elemAddrIn] += valOffs; } __global__ void removePass3Compact(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_dst, Ncv32u elemRemove, Ncv32u *dstLenValue) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } Ncv32u elem = d_src[elemAddrIn]; Ncv32u elemAddrOut = d_offsets[elemAddrIn]; if (elem != elemRemove) { d_dst[elemAddrOut] = elem; } if (elemAddrIn == srcLen-1) { if (elem != elemRemove) { *dstLenValue = elemAddrOut + 1; } else { *dstLenValue = elemAddrOut; } } } NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *dstLenPinned, Ncv32u elemRemove, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLenPinned != NULL) { *dstLenPinned = 0; } return NPPST_SUCCESS; } std::vector<Ncv32u> partSumNums; std::vector<Ncv32u> partSumOffsets; Ncv32u partSumLastNum = srcLen; Ncv32u partSumLastOffs = 0; do { partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); Ncv32u curPartSumAlignedLength = alignUp(partSumLastNum * sizeof(Ncv32u), gpuAllocator.alignment()) / sizeof(Ncv32u); partSumLastOffs += curPartSumAlignedLength; partSumLastNum = (partSumLastNum + NUM_REMOVE_THREADS - 1) / NUM_REMOVE_THREADS; } while (partSumLastNum>1); partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); NCVVectorAlloc<Ncv32u> d_hierSums(gpuAllocator, partSumLastOffs+1); ncvAssertReturn(gpuAllocator.isCounting() || d_hierSums.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVVectorAlloc<Ncv32u> d_numDstElements(gpuAllocator, 1); ncvAssertReturn(gpuAllocator.isCounting() || d_numDstElements.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN dim3 block(NUM_REMOVE_THREADS); //calculate zero-level partial sums for indices calculation if (partSumNums.size() > 2) { dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } removePass1Scan <true, true> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcLen, d_hierSums.ptr(), d_hierSums.ptr() + partSumOffsets[1], elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //calculate hierarchical partial sums for (Ncv32u i=1; i<partSumNums.size()-1; i++) { dim3 grid_partial(partSumNums[i+1]); if (grid_partial.x > 65535) { grid_partial.y = (grid_partial.x + 65534) / 65535; grid_partial.x = 65535; } if (grid_partial.x != 1) { removePass1Scan <false, true> <<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>> (d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, d_hierSums.ptr() + partSumOffsets[i+1], 0); } else { removePass1Scan <false, false> <<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>> (d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, NULL, 0); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //adjust hierarchical partial sums for (Ncv32s i=(Ncv32s)partSumNums.size()-3; i>=0; i--) { dim3 grid_local(partSumNums[i+1]); if (grid_local.x > 65535) { grid_local.y = (grid_local.x + 65534) / 65535; grid_local.x = 65535; } removePass2Adjust <<<grid_local, block, 0, nppStGetActiveCUDAstream()>>> (d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], d_hierSums.ptr() + partSumOffsets[i+1]); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } } else { dim3 grid_local(partSumNums[1]); removePass1Scan <true, false> <<<grid_local, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcLen, d_hierSums.ptr(), NULL, elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //compact source vector using indices dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } removePass3Compact <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcLen, d_hierSums.ptr(), d_dst, elemRemove, d_numDstElements.ptr()); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //get number of dst elements if (dstLenPinned != NULL) { ncvAssertCUDAReturn(cudaMemcpyAsync(dstLenPinned, d_numDstElements.ptr(), sizeof(Ncv32u), cudaMemcpyDeviceToHost, nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); } NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32u(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { *pBufsize = 0; return NPPST_SUCCESS; } NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(NULL, srcLen, NULL, NULL, 0xC001C0DE, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32s(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompactGetSize_32f(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompact_32u(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *p_dstLen, Ncv32u elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(d_src, srcLen, d_dst, p_dstLen, elemRemove, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s(Ncv32s *d_src, Ncv32u srcLen, Ncv32s *d_dst, Ncv32u *p_dstLen, Ncv32s elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp); } #if defined __GNUC__ && __GNUC__ > 2 && __GNUC_MINOR__ > 4 typedef Ncv32u __attribute__((__may_alias__)) Ncv32u_a; #else typedef Ncv32u Ncv32u_a; #endif NCVStatus nppsStCompact_32f(Ncv32f *d_src, Ncv32u srcLen, Ncv32f *d_dst, Ncv32u *p_dstLen, Ncv32f elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u_a *)&elemRemove, pBuffer, bufSize, devProp); } NCVStatus nppsStCompact_32u_host(Ncv32u *h_src, Ncv32u srcLen, Ncv32u *h_dst, Ncv32u *dstLen, Ncv32u elemRemove) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLen != NULL) { *dstLen = 0; } return NPPST_SUCCESS; } Ncv32u dstIndex = 0; for (Ncv32u srcIndex=0; srcIndex<srcLen; srcIndex++) { if (h_src[srcIndex] != elemRemove) { h_dst[dstIndex++] = h_src[srcIndex]; } } if (dstLen != NULL) { *dstLen = dstIndex; } return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s_host(Ncv32s *h_src, Ncv32u srcLen, Ncv32s *h_dst, Ncv32u *dstLen, Ncv32s elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove); } NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen, Ncv32f *h_dst, Ncv32u *dstLen, Ncv32f elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove); } //============================================================================== // // Filter.cu // //============================================================================== texture <float, 1, cudaReadModeElementType> texSrc; texture <float, 1, cudaReadModeElementType> texKernel; __forceinline__ __device__ float getValueMirrorRow(const int rowOffset, int i, int w) { if (i < 0) i = 1 - i; if (i >= w) i = w + w - i - 1; return tex1Dfetch (texSrc, rowOffset + i); } __forceinline__ __device__ float getValueMirrorColumn(const int offset, const int rowStep, int j, int h) { if (j < 0) j = 1 - j; if (j >= h) j = h + h - j - 1; return tex1Dfetch (texSrc, offset + j * rowStep); } __global__ void FilterRowBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { // position within ROI const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int j = roi.y + iy; const int rowOffset = j * srcStep + roi.x; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorRow (rowOffset, ix + m - p, roi.width) * tex1Dfetch (texKernel, m); } pDst[iy * dstStep + ix] = sum * multiplier; } __global__ void FilterColumnBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int i = roi.x + ix; const int offset = i + roi.y * srcStep; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorColumn (offset, srcStep, iy + m - p, roi.height) * tex1Dfetch (texKernel, m); } pDst[ix + iy * dstStep] = sum * multiplier; } NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } cudaChannelFormatDesc floatChannel = cudaCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; cudaBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); cudaBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderNone: return NPPST_ERROR; case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: FilterRowBorderMirror_32f_C1R <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } cudaChannelFormatDesc floatChannel = cudaCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; cudaBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); cudaBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: FilterColumnBorderMirror_32f_C1R <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } //============================================================================== // // FrameInterpolate.cu // //============================================================================== inline Ncv32u iDivUp(Ncv32u num, Ncv32u denom) { return (num + denom - 1)/denom; } texture<float, 2, cudaReadModeElementType> tex_src1; texture<float, 2, cudaReadModeElementType> tex_src0; __global__ void BlendFramesKernel(const float *u, const float *v, // forward flow const float *ur, const float *vr, // backward flow const float *o0, const float *o1, // coverage masks int w, int h, int s, float theta, float *out) { const int ix = threadIdx.x + blockDim.x * blockIdx.x; const int iy = threadIdx.y + blockDim.y * blockIdx.y; const int pos = ix + s * iy; if (ix >= w || iy >= h) return; float _u = u[pos]; float _v = v[pos]; float _ur = ur[pos]; float _vr = vr[pos]; float x = (float)ix + 0.5f; float y = (float)iy + 0.5f; bool b0 = o0[pos] > 1e-4f; bool b1 = o1[pos] > 1e-4f; if (b0 && b1) { // pixel is visible on both frames out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta) * (1.0f - theta) + tex2D(tex_src1, x + _u * (1.0f - theta), y + _v * (1.0f - theta)) * theta; } else if (b0) { // visible on the first frame only out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta); } else { // visible on the second frame only out[pos] = tex2D(tex_src1, x - _ur * (1.0f - theta), y - _vr * (1.0f - theta)); } } NCVStatus BlendFrames(const Ncv32f *src0, const Ncv32f *src1, const Ncv32f *ufi, const Ncv32f *vfi, const Ncv32f *ubi, const Ncv32f *vbi, const Ncv32f *o1, const Ncv32f *o2, Ncv32u width, Ncv32u height, Ncv32u stride, Ncv32f theta, Ncv32f *out) { tex_src1.addressMode[0] = cudaAddressModeClamp; tex_src1.addressMode[1] = cudaAddressModeClamp; tex_src1.filterMode = cudaFilterModeLinear; tex_src1.normalized = false; tex_src0.addressMode[0] = cudaAddressModeClamp; tex_src0.addressMode[1] = cudaAddressModeClamp; tex_src0.filterMode = cudaFilterModeLinear; tex_src0.normalized = false; cudaChannelFormatDesc desc = cudaCreateChannelDesc <float> (); const Ncv32u pitch = stride * sizeof (float); ncvAssertCUDAReturn (cudaBindTexture2D (0, tex_src1, src1, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn (cudaBindTexture2D (0, tex_src0, src0, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); dim3 threads (32, 4); dim3 blocks (iDivUp (width, threads.x), iDivUp (height, threads.y)); BlendFramesKernel<<<blocks, threads, 0, nppStGetActiveCUDAstream ()>>> (ufi, vfi, ubi, vbi, o1, o2, width, height, stride, theta, out); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStGetInterpolationBufferSize(NcvSize32u srcSize, Ncv32u nStep, Ncv32u *hpSize) { NCVStatus status = NPPST_ERROR; status = nppiStVectorWarpGetBufferSize(srcSize, nStep, hpSize); return status; } NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState) { // check state validity ncvAssertReturn (pState->pSrcFrame0 != 0 && pState->pSrcFrame1 != 0 && pState->pFU != 0 && pState->pFV != 0 && pState->pBU != 0 && pState->pBV != 0 && pState->pNewFrame != 0 && pState->ppBuffers[0] != 0 && pState->ppBuffers[1] != 0 && pState->ppBuffers[2] != 0 && pState->ppBuffers[3] != 0 && pState->ppBuffers[4] != 0 && pState->ppBuffers[5] != 0, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (pState->size.width > 0 && pState->size.height > 0, NPPST_ERROR); ncvAssertReturn (pState->nStep >= pState->size.width * sizeof (Ncv32f) && pState->nStep > 0 && pState->nStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); // change notation Ncv32f *cov0 = pState->ppBuffers[0]; Ncv32f *cov1 = pState->ppBuffers[1]; Ncv32f *fwdU = pState->ppBuffers[2]; // forward u Ncv32f *fwdV = pState->ppBuffers[3]; // forward v Ncv32f *bwdU = pState->ppBuffers[4]; // backward u Ncv32f *bwdV = pState->ppBuffers[5]; // backward v // warp flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFU, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFV, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdV) ); // warp backward flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBU, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBV, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); // interpolate frame ncvAssertReturnNcvStat ( BlendFrames (pState->pSrcFrame0, pState->pSrcFrame1, fwdU, fwdV, bwdU, bwdV, cov0, cov1, pState->size.width, pState->size.height, pState->nStep / sizeof (Ncv32f), pState->pos, pState->pNewFrame) ); return NPPST_SUCCESS; } //============================================================================== // // VectorWarpFrame.cu // //============================================================================== #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) // FP32 atomic add static __forceinline__ __device__ float _atomicAdd(float *addr, float val) { float old = *addr, assumed; do { assumed = old; old = int_as_float(__iAtomicCAS((int*)addr, float_as_int(assumed), float_as_int(val+assumed))); } while( assumed!=old ); return old; } #else #define _atomicAdd atomicAdd #endif __global__ void ForwardWarpKernel_PSF2x2(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *normalization_factor, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; //bottom left corner of a target pixel float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f; float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f; // pixel containing bottom left corner float px; float py; float dx = modff (cx, &px); float dy = modff (cy, &py); // target pixel integer coords int tx; int ty; tx = (int) px; ty = (int) py; float value = src[image_row_offset + j]; float weight; // fill pixel containing bottom right corner if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing bottom left corner tx -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper left corner ty -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper right corner tx += 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } } __global__ void ForwardWarpKernel_PSF1x1(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; float u_ = u[flow_row_offset + j]; float v_ = v[flow_row_offset + j]; //bottom left corner of target pixel float cx = u_ * time_scale + (float)j + 1.0f; float cy = v_ * time_scale + (float)i + 1.0f; // pixel containing bottom left corner int tx = __float2int_rn (cx); int ty = __float2int_rn (cy); float value = src[image_row_offset + j]; // fill pixel if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { _atomicAdd (dst + ty * image_stride + tx, value); } } __global__ void NormalizeKernel(const float *normalization_factor, int w, int h, int s, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * s + j; float scale = normalization_factor[pos]; float invScale = (scale == 0.0f) ? 1.0f : (1.0f / scale); image[pos] *= invScale; } __global__ void MemsetKernel(const float value, int w, int h, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * w + j; image[pos] = value; } NCVStatus nppiStVectorWarpGetBufferSize (NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32u *hpSize) { ncvAssertReturn (hpSize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep, NPPST_INVALID_STEP); *hpSize = nSrcStep * srcSize.height; return NPPST_SUCCESS; } // does not require normalization NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof (Ncv32f); dim3 ctaSize (32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); ForwardWarpKernel_PSF1x1 <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStVectorWarp_PSF2x2_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f *pBuffer, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL && pBuffer != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof(Ncv32f); dim3 ctaSize(32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); MemsetKernel <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (0, srcSize.width, srcSize.height, pBuffer); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); ForwardWarpKernel_PSF2x2 <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pBuffer, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); NormalizeKernel <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (pBuffer, srcSize.width, srcSize.height, srcStep, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } //============================================================================== // // Resize.cu // //============================================================================== texture <float, 2, cudaReadModeElementType> texSrc2D; __forceinline__ __device__ float processLine(int spos, float xmin, float xmax, int ixmin, int ixmax, float fxmin, float cxmax) { // first element float wsum = 1.0f - xmin + fxmin; float sum = tex1Dfetch(texSrc, spos) * (1.0f - xmin + fxmin); spos++; for (int ix = ixmin + 1; ix < ixmax; ++ix) { sum += tex1Dfetch(texSrc, spos); spos++; wsum += 1.0f; } sum += tex1Dfetch(texSrc, spos) * (cxmax - xmax); wsum += cxmax - xmax; return sum / wsum; } __global__ void resizeSuperSample_32f(NcvSize32u srcSize, Ncv32u srcStep, NcvRect32u srcROI, Ncv32f *dst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { // position within dst ROI const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } float rw = (float) srcROI.width; float rh = (float) srcROI.height; // source position float x = scaleX * (float) ix; float y = scaleY * (float) iy; // x sampling range float xBegin = fmax (x - scaleX, 0.0f); float xEnd = fmin (x + scaleX, rw - 1.0f); // y sampling range float yBegin = fmax (y - scaleY, 0.0f); float yEnd = fmin (y + scaleY, rh - 1.0f); // x range of source samples float floorXBegin = floorf (xBegin); float ceilXEnd = ceilf (xEnd); int iXBegin = srcROI.x + (int) floorXBegin; int iXEnd = srcROI.x + (int) ceilXEnd; // y range of source samples float floorYBegin = floorf (yBegin); float ceilYEnd = ceilf (yEnd); int iYBegin = srcROI.y + (int) floorYBegin; int iYEnd = srcROI.y + (int) ceilYEnd; // first row int pos = iYBegin * srcStep + iXBegin; float wsum = 1.0f - yBegin + floorYBegin; float sum = processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (1.0f - yBegin + floorYBegin); pos += srcStep; for (int iy = iYBegin + 1; iy < iYEnd; ++iy) { sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd); pos += srcStep; wsum += 1.0f; } sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (ceilYEnd - yEnd); wsum += ceilYEnd - yEnd; sum /= wsum; dst[(ix + dstROI.x) + (iy + dstROI.y) * dstStep] = sum; } // bicubic interpolation __forceinline__ __device__ float bicubicCoeff(float x_) { float x = fabsf(x_); if (x <= 1.0f) { return x * x * (1.5f * x - 2.5f) + 1.0f; } else if (x < 2.0f) { return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; } else { return 0.0f; } } __global__ void resizeBicubic(NcvSize32u srcSize, NcvRect32u srcROI, NcvSize32u dstSize, Ncv32u dstStep, Ncv32f *dst, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } const float dx = 1.0f / srcROI.width; const float dy = 1.0f / srcROI.height; float rx = (float) srcROI.x; float ry = (float) srcROI.y; float rw = (float) srcROI.width; float rh = (float) srcROI.height; float x = scaleX * (float) ix; float y = scaleY * (float) iy; // sampling range // border mode is clamp float xmin = fmax (ceilf (x - 2.0f), 0.0f); float xmax = fmin (floorf (x + 2.0f), rw - 1.0f); float ymin = fmax (ceilf (y - 2.0f), 0.0f); float ymax = fmin (floorf (y + 2.0f), rh - 1.0f); // shift data window to match ROI rx += 0.5f; ry += 0.5f; x += rx; y += ry; xmin += rx; xmax += rx; ymin += ry; ymax += ry; float sum = 0.0f; float wsum = 0.0f; for (float cy = ymin; cy <= ymax; cy += 1.0f) { for (float cx = xmin; cx <= xmax; cx += 1.0f) { float xDist = x - cx; float yDist = y - cy; float wx = bicubicCoeff (xDist); float wy = bicubicCoeff (yDist); wx *= wy; sum += wx * tex2D (texSrc2D, cx * dx, cy * dy); wsum += wx; } } dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = (!wsum)? 0 : sum / wsum; } NCVStatus nppiStResize_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, NcvRect32u srcROI, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u dstROI, Ncv32f xFactor, Ncv32f yFactor, NppStInterpMode interpolation) { NCVStatus status = NPPST_SUCCESS; ncvAssertReturn (pSrc != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (xFactor != 0.0 && yFactor != 0.0, NPPST_INVALID_SCALE); ncvAssertReturn (nSrcStep >= sizeof (Ncv32f) * (Ncv32u) srcSize.width && nDstStep >= sizeof (Ncv32f) * (Ncv32f) dstSize.width, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // TODO: preprocess ROI to prevent out of bounds access if (interpolation == nppStSupersample) { // bind texture cudaBindTexture (0, texSrc, pSrc, srcSize.height * nSrcStep); // invoke kernel dim3 ctaSize (32, 6); dim3 gridSize ((dstROI.width + ctaSize.x - 1) / ctaSize.x, (dstROI.height + ctaSize.y - 1) / ctaSize.y); resizeSuperSample_32f <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcSize, srcStep, srcROI, pDst, dstSize, dstStep, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else if (interpolation == nppStBicubic) { texSrc2D.addressMode[0] = cudaAddressModeMirror; texSrc2D.addressMode[1] = cudaAddressModeMirror; texSrc2D.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc <float> (); cudaBindTexture2D (0, texSrc2D, pSrc, desc, srcSize.width, srcSize.height, nSrcStep); dim3 ctaSize (32, 6); dim3 gridSize ((dstSize.width + ctaSize.x - 1) / ctaSize.x, (dstSize.height + ctaSize.y - 1) / ctaSize.y); resizeBicubic <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcSize, srcROI, dstSize, dstStep, pDst, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else { status = NPPST_ERROR; } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return status; } #endif /* CUDA_DISABLER */
e1df70620f45ee8b3ef1d70cedddb548424e58ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Prerequisites.cuh" #include "Angles.cuh" #include "Correlation.cuh" #include "CTF.cuh" #include "FFT.cuh" #include "Generics.cuh" #include "Helper.cuh" #include "ImageManipulation.cuh" #include "Masking.cuh" #include "Optimization.cuh" #include "Transformation.cuh" namespace gtom { //////////////////////////// //CUDA kernel declarations// //////////////////////////// __global__ void AccumulateSpectraKernel(tfloat* d_ps1d, tfloat* d_defoci, uint nspectra, uint length, tfloat* d_accumulated, tfloat accumulateddefocus, tfloat* d_perbatchoffsets, uint lowfreq, uint relevantlength, tfloat cs, tfloat lambda, tfloat pxfactor); ///////////////////////////////// //Auxiliary methods and kernels// ///////////////////////////////// void PopulateAngles(std::vector<tfloat3> &v_angles, tfloat3 phibracket, tfloat3 thetabracket, tfloat3 psibracket) { for (tfloat psi = psibracket.x; psi <= psibracket.y + 1e-5f; psi += psibracket.z) { for (tfloat theta = thetabracket.x; theta <= thetabracket.y + 1e-5f; theta += thetabracket.z) { for (tfloat phi = phibracket.x; phi <= phibracket.y + 1e-5f; phi += phibracket.z) { v_angles.push_back(tfloat3(phi, theta, psi)); if (phibracket.z == 0) break; } if (thetabracket.z == 0) break; } if (psibracket.z == 0) break; } } void d_AccumulateSpectra(tfloat* d_ps1d, tfloat* d_defoci, uint nspectra, tfloat* d_accumulated, tfloat accumulateddefocus, tfloat* d_perbatchoffsets, CTFParams p, CTFFitParams fp, uint batch) { uint length = fp.dimsperiodogram.x / 2; uint relevantlength = fp.maskouterradius - fp.maskinnerradius; CTFParamsLean lean = CTFParamsLean(p, toInt3(fp.dimsperiodogram)); dim3 TpB = dim3(tmin(128, NextMultipleOf(relevantlength, 32))); dim3 grid = dim3((relevantlength + TpB.x - 1) / TpB.x, batch); AccumulateSpectraKernel << <grid, TpB >> > (d_ps1d, d_defoci, nspectra, length, d_accumulated, accumulateddefocus, d_perbatchoffsets, fp.maskinnerradius, relevantlength, lean.Cs, lean.lambda, lean.ny); } __global__ void AccumulateSpectraKernel(tfloat* d_ps1d, tfloat* d_defoci, uint nspectra, uint length, tfloat* d_accumulated, tfloat accumulateddefocus, tfloat* d_perbatchoffsets, uint lowfreq, uint relevantlength, tfloat cs, tfloat lambda, tfloat pxfactor) { uint id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= relevantlength) return; tfloat sum = 0; uint samples = 0; double K = (double)(id + lowfreq) * pxfactor; double K2 = K * K; double K4 = K2 * K2; double defocusoffset = d_perbatchoffsets[blockIdx.y]; double D = (accumulateddefocus + defocusoffset) * 1e10; double lambda2 = lambda * lambda; double lambda4 = lambda2 * lambda2; double cs2 = cs * cs; for (uint n = 0; n < nspectra; n++, d_ps1d += length) { double d = (d_defoci[n] + defocusoffset) * 1e10; if (d < 0 != D < 0) // Combining defoci with different signs won't work continue; double k = sqrt(abs(abs(d) - sqrt(cs2 * K4 * lambda4 + 2.0 * cs * D * K2 * lambda2 + d * d)) / (cs * lambda2)); k /= pxfactor; if (ceil(k) >= length) // Out of range continue; // Cubic interpolation uint p1 = k; tfloat sample0 = d_ps1d[tmax(1U, p1) - 1]; tfloat sample1 = d_ps1d[p1]; tfloat sample2 = d_ps1d[tmin(length - 1, p1 + 1)]; tfloat sample3 = d_ps1d[tmin(length - 1, p1 + 2)]; tfloat factor0 = -0.5f * sample0 + 1.5f * sample1 - 1.5f * sample2 + 0.5f * sample3; tfloat factor1 = sample0 - 2.5f * sample1 + 2.0f * sample2 - 0.5f * sample3; tfloat factor2 = -0.5f * sample0 + 0.5f * sample2; tfloat factor3 = sample1; tfloat interp = k - (tfloat)p1; sum += ((factor0 * interp + factor1) * interp + factor2) * interp + factor3; samples++; } d_accumulated[relevantlength * blockIdx.y + id] = sum / (tfloat)tmax(1U, samples); } /////////////////////////////////////////// //Fit defocus and tilt in tilted specimen// /////////////////////////////////////////// void h_CTFTiltFit(tfloat* h_image, int2 dimsimage, uint nimages, float overlapfraction, std::vector<CTFTiltParams> &startparams, CTFFitParams fp, tfloat maxtheta, tfloat2 &specimentilt, tfloat* h_defoci) { tfloat* d_image; hipMalloc((void**)&d_image, Elements2(dimsimage) * sizeof(tfloat)); /*tfloat* d_imagecropped; hipMalloc((void**)&d_imagecropped, Elements2(dimsimage) * sizeof(tfloat));*/ tfloat3 phibracket = tfloat3(0.0f, ToRad(360.0f), ToRad(20.0f)); tfloat3 thetabracket = tfloat3(0.0f, maxtheta, ToRad(4.0f)); for (uint i = 0; i < nimages; i++) h_defoci[i] = 0; for (uint r = 0; r < 4; r++) { int2 anglegrid = toInt2((phibracket.y - phibracket.x + 1e-5f) / phibracket.z + 1, (thetabracket.y - thetabracket.x + 1e-5f) / thetabracket.z + 1); std::vector<tfloat3> v_angles; PopulateAngles(v_angles, phibracket, thetabracket, tfloat3(0)); tfloat* h_scores = MallocValueFilled(Elements2(anglegrid), (tfloat)0); tfloat* h_samples = MallocValueFilled(Elements2(anglegrid), (tfloat)0); if (Elements2(anglegrid) != v_angles.size()) throw; for (uint i = 0; i < nimages; i++) { hipMemcpy(d_image, h_image + Elements2(dimsimage) * i, Elements2(dimsimage) * sizeof(tfloat), hipMemcpyHostToDevice); /*int2 dimscropped = toInt2(cos(startparams[i].stageangle.y) * dimsimage.x, dimsimage.y); dimscropped.x += dimscropped.x % 2; d_Pad(d_image, d_imagecropped, toInt3(dimsimage), toInt3(dimscropped), T_PAD_VALUE, (tfloat)0);*/ std::vector<tfloat2> v_results; CTFTiltParams adjustedparams(startparams[i].imageangle, startparams[i].stageangle, startparams[i].specimenangle, startparams[i].centerparams); adjustedparams.centerparams.defocus += h_defoci[i]; d_CTFTiltFit(d_image, dimsimage, overlapfraction, adjustedparams, fp, v_angles, 3, v_results); // First, add to the average score grid... for (uint n = 0; n < Elements2(anglegrid); n++) { h_samples[n] += 1.0;// pow(cos(startparams[i].stageangle.y), 2.0); h_scores[n] += v_results[n].x;// *pow(cos(startparams[i].stageangle.y), 2.0); } // ... then, take the defocus value with the highest score std::sort(v_results.begin(), v_results.end(), [](const tfloat2 &a, const tfloat2 &b) -> bool { return a.x > b.x; }); h_defoci[i] += v_results[0].y; // Update adjustment for next iteration so it doesn't start the search from scratch } for (uint n = 0; n < Elements2(anglegrid); n++) h_scores[n] /= h_samples[n]; //WriteToBinaryFile("d_scores.bin", h_scores, Elements2(anglegrid) * sizeof(tfloat)); free(h_samples); int2 maxposition; tfloat maxvalue = -1e30f; for (uint y = 0; y < anglegrid.y; y++) for (uint x = 0; x < anglegrid.x; x++) if (h_scores[y * anglegrid.x + x] > maxvalue) { maxposition = toInt2(x, y); maxvalue = h_scores[y * anglegrid.x + x]; } free(h_scores); // Update tilt estimate and shrink search brackets specimentilt = tfloat2(phibracket.x + maxposition.x * phibracket.z, thetabracket.x + maxposition.y * thetabracket.z); phibracket = tfloat3(specimentilt.x - phibracket.z * 0.75f, specimentilt.x + phibracket.z * 0.75f, phibracket.z / 4.0f); thetabracket = tfloat3(specimentilt.y - thetabracket.z * 0.75f, specimentilt.y + thetabracket.z * 0.75f, thetabracket.z / 4.0f); // Adjust defocus search bracket after first iteration, as the first defocus estimate can't be too far from the truth if (r == 0) { fp.defocus.x = -fp.defocus.z * 1.5; fp.defocus.y = fp.defocus.z * 1.5; fp.defocus.z /= 4.0; } } //hipFree(d_imagecropped); hipFree(d_image); } void d_CTFTiltFit(tfloat* d_image, int2 dimsimage, float overlapfraction, CTFTiltParams &startparams, CTFFitParams fp, std::vector<tfloat3> &v_angles, int defocusrefinements, std::vector<tfloat2> &v_results) { CTFFitParams originalfp = fp; fp.maskinnerradius = 0; fp.maskouterradius = fp.dimsperiodogram.x / 2; int2 dimspolar = GetCart2PolarFFTSize(fp.dimsperiodogram); dimspolar.x = fp.maskouterradius - fp.maskinnerradius; // Create grid, allocate memory for spectra int2 dimsgrid; int3* h_origins = GetEqualGridSpacing(dimsimage, fp.dimsperiodogram, overlapfraction, dimsgrid); uint norigins = Elements2(dimsgrid); // Allocate memory for spectra and point coords tfloat* d_ps1d; hipMalloc((void**)&d_ps1d, dimspolar.x * norigins * sizeof(tfloat)); float2* d_ps1dcoords; hipMalloc((void**)&d_ps1dcoords, dimspolar.x * norigins * sizeof(float2)); { int3* d_origins = (int3*)CudaMallocFromHostArray(h_origins, norigins * sizeof(int3)); tfloat* d_ps1dmin; hipMalloc((void**)&d_ps1dmin, dimspolar.x * sizeof(tfloat)); tfloat* d_ps1dmax; hipMalloc((void**)&d_ps1dmax, dimspolar.x * sizeof(tfloat)); tfloat* d_ps2d; // Extracted spectra in Cartesian coords hipMalloc((void**)&d_ps2d, ElementsFFT2(fp.dimsperiodogram) * norigins * sizeof(tfloat)); tfloat* d_ps2dpolar; hipMalloc((void**)&d_ps2dpolar, Elements2(dimspolar) * norigins * sizeof(tfloat)); float2* d_ps2dcoords; hipMalloc((void**)&d_ps2dcoords, Elements2(dimspolar) * sizeof(float2)); CTFParams* h_params = (CTFParams*)malloc(norigins * sizeof(CTFParams)); for (uint n = 0; n < norigins; n++) h_params[n] = startparams.centerparams; d_CTFFitCreateTarget2D(d_image, dimsimage, d_origins, h_params, norigins, fp, d_ps2dpolar, d_ps2dcoords, true, d_ps1dmin, d_ps1dmax); // All averaged to one for background d_SubtractVector(d_ps1dmax, d_ps1dmin, d_ps1dmax, dimspolar.x); d_MaxOp(d_ps1dmax, 0.2f, d_ps1dmax, dimspolar.x); // Extract, average, convert to polar d_CTFPeriodogram(d_image, dimsimage, d_origins, norigins, fp.dimsperiodogram, fp.dimsperiodogram, d_ps2d); d_Cart2PolarFFT(d_ps2d, d_ps2dpolar, fp.dimsperiodogram, T_INTERP_CUBIC, fp.maskinnerradius, fp.maskouterradius, norigins); // Create polar background image tfloat* d_ps2dmin; hipMalloc((void**)&d_ps2dmin, Elements2(dimspolar) * sizeof(tfloat)); CudaMemcpyMulti(d_ps2dmin, d_ps1dmin, dimspolar.x, dimspolar.y, 1); //d_WriteMRC(d_ps2dmin, toInt3(dimspolar.x, dimspolar.y, 1), "d_ps2dmin.mrc"); tfloat* d_ps2dmax; hipMalloc((void**)&d_ps2dmax, Elements2(dimspolar) * sizeof(tfloat)); CudaMemcpyMulti(d_ps2dmax, d_ps1dmax, dimspolar.x, dimspolar.y, 1); //d_WriteMRC(d_ps2dmax, toInt3(dimspolar.x, dimspolar.y, 1), "d_ps2dmax.mrc"); // Subtract background and normalize d_SubtractVector(d_ps2dpolar, d_ps2dmin, d_ps2dpolar, Elements2(dimspolar), norigins); d_DivideSafeByVector(d_ps2dpolar, d_ps2dmax, d_ps2dpolar, Elements2(dimspolar), norigins); d_NormMonolithic(d_ps2dpolar, d_ps2dpolar, Elements2(dimspolar), T_NORM_MEAN01STD, norigins); // Create 1D targets and normalize d_CTFFitCreateTarget1D(d_ps2dpolar, d_ps2dcoords, dimspolar, h_params, norigins, fp, d_ps1d, d_ps1dcoords); d_ValueFill(d_ps2d, dimspolar.x, (tfloat)0); d_ValueFill(d_ps2d + originalfp.maskinnerradius, originalfp.maskouterradius - originalfp.maskinnerradius, (tfloat)1); CudaMemcpyMulti(d_ps2d, d_ps2d, dimspolar.x, norigins); d_NormMonolithic(d_ps1d, d_ps1d, dimspolar.x, d_ps2d, T_NORM_MEAN01STD, norigins); //d_WriteMRC(d_ps1d, toInt3(dimspolar.x, norigins, 1), "d_ps1d.mrc"); hipFree(d_ps2dmax); hipFree(d_ps2dmin); hipFree(d_ps2dcoords); hipFree(d_ps2dpolar); hipFree(d_ps2d); hipFree(d_ps1dmax); hipFree(d_ps1dmin); hipFree(d_origins); free(h_params); } fp = originalfp; dimspolar.x = fp.maskouterradius - fp.maskinnerradius; // Store radius & angle for each 1D target point { float2* h_ps1dcoords = (float2*)malloc(dimspolar.x * sizeof(float2)); float invhalfsize = 2.0f / (float)fp.dimsperiodogram.x; for (int r = 0; r < dimspolar.x; r++) { float rf = (float)(r + fp.maskinnerradius) * invhalfsize; h_ps1dcoords[r] = make_float2(rf, 0.0f); } hipMemcpy(d_ps1dcoords, h_ps1dcoords, dimspolar.x * sizeof(float2), hipMemcpyHostToDevice); free(h_ps1dcoords); } { for (int a = 0; a < v_angles.size(); a++) { CTFFitParams anglefp = fp; std::vector<std::pair<tfloat, CTFParams> > v_params; AddCTFParamsRange(v_params, anglefp); // Calculate defocus offsets across grid tfloat* d_griddefoci; { tfloat* h_griddefoci = (tfloat*)malloc(norigins * sizeof(tfloat)); CTFTiltParams currenttilt(startparams.imageangle, startparams.stageangle, tfloat2(v_angles[a].x, v_angles[a].y), startparams.centerparams); currenttilt.GetZGrid2D(dimsimage, fp.dimsperiodogram, h_origins, norigins, h_griddefoci); d_griddefoci = (tfloat*)CudaMallocFromHostArray(h_griddefoci, norigins * sizeof(tfloat)); free(h_griddefoci); } for (uint d = 0; d <= defocusrefinements; d++) { // Defocus search space tfloat* h_defocusoffsets = (tfloat*)malloc(v_params.size() * sizeof(tfloat)); CTFParams* h_params = (CTFParams*)malloc(v_params.size() * sizeof(CTFParams)); // Adjust copies to various defoci for (uint n = 0; n < v_params.size(); n++) { h_params[n] = startparams.centerparams; h_params[n].defocus += v_params[n].second.defocus; h_defocusoffsets[n] = v_params[n].second.defocus; } // Finally, accumulate the spectra based on the suggested defocus values tfloat* d_defocusoffsets = (tfloat*)CudaMallocFromHostArray(h_defocusoffsets, v_params.size() * sizeof(tfloat)); tfloat* d_accumulated; hipMalloc((void**)&d_accumulated, dimspolar.x * v_params.size() * sizeof(tfloat)); d_AccumulateSpectra(d_ps1d, d_griddefoci, norigins, d_accumulated, startparams.centerparams.defocus, d_defocusoffsets, startparams.centerparams, fp, v_params.size()); d_NormMonolithic(d_accumulated, d_accumulated, dimspolar.x, T_NORM_MEAN01STD, v_params.size()); //CudaWriteToBinaryFile("d_accumulated.bin", d_accumulated, dimspolar.x * v_params.size() * sizeof(tfloat)); hipFree(d_defocusoffsets); free(h_defocusoffsets); // Simulate CTF tfloat* d_ctfsim; hipMalloc((void**)&d_ctfsim, dimspolar.x * v_params.size() * sizeof(tfloat)); d_CTFSimulate(h_params, d_ps1dcoords, NULL, d_ctfsim, dimspolar.x, true, false, v_params.size()); d_NormMonolithic(d_ctfsim, d_ctfsim, dimspolar.x, T_NORM_MEAN01STD, v_params.size()); //CudaWriteToBinaryFile("d_ctfsim.bin", d_ctfsim, dimspolar.x * v_params.size() * sizeof(tfloat)); free(h_params); // Correlate d_MultiplyByVector(d_ctfsim, d_accumulated, d_ctfsim, dimspolar.x * v_params.size()); d_SumMonolithic(d_ctfsim, d_accumulated, dimspolar.x, v_params.size()); tfloat* h_scores = (tfloat*)MallocFromDeviceArray(d_accumulated, v_params.size() * sizeof(tfloat)); for (uint n = 0; n < v_params.size(); n++) v_params[n].first = h_scores[n] / (tfloat)dimspolar.x; free(h_scores); hipFree(d_ctfsim); hipFree(d_accumulated); // Sort defoci by score in descending order std::sort(v_params.begin(), v_params.end(), [](const std::pair<tfloat, CTFFitParams> &a, const std::pair<tfloat, CTFFitParams> &b) -> bool { return a.first > b.first; }); if (d < defocusrefinements) { std::vector<std::pair<tfloat, CTFParams> > v_newparams; anglefp.defocus.z /= 4.0; for (uint f = 0; f < 5; f++) { CTFFitParams localfp; localfp.defocus = tfloat3(v_params[f].second.defocus - anglefp.defocus.z * 3.0, v_params[f].second.defocus + anglefp.defocus.z * 3.0, anglefp.defocus.z); AddCTFParamsRange(v_newparams, localfp); } v_params = v_newparams; } } hipFree(d_griddefoci); v_results.push_back(tfloat2(v_params[0].first, v_params[0].second.defocus)); } } hipFree(d_ps1d); hipFree(d_ps1dcoords); free(h_origins); } }
e1df70620f45ee8b3ef1d70cedddb548424e58ae.cu
#include "Prerequisites.cuh" #include "Angles.cuh" #include "Correlation.cuh" #include "CTF.cuh" #include "FFT.cuh" #include "Generics.cuh" #include "Helper.cuh" #include "ImageManipulation.cuh" #include "Masking.cuh" #include "Optimization.cuh" #include "Transformation.cuh" namespace gtom { //////////////////////////// //CUDA kernel declarations// //////////////////////////// __global__ void AccumulateSpectraKernel(tfloat* d_ps1d, tfloat* d_defoci, uint nspectra, uint length, tfloat* d_accumulated, tfloat accumulateddefocus, tfloat* d_perbatchoffsets, uint lowfreq, uint relevantlength, tfloat cs, tfloat lambda, tfloat pxfactor); ///////////////////////////////// //Auxiliary methods and kernels// ///////////////////////////////// void PopulateAngles(std::vector<tfloat3> &v_angles, tfloat3 phibracket, tfloat3 thetabracket, tfloat3 psibracket) { for (tfloat psi = psibracket.x; psi <= psibracket.y + 1e-5f; psi += psibracket.z) { for (tfloat theta = thetabracket.x; theta <= thetabracket.y + 1e-5f; theta += thetabracket.z) { for (tfloat phi = phibracket.x; phi <= phibracket.y + 1e-5f; phi += phibracket.z) { v_angles.push_back(tfloat3(phi, theta, psi)); if (phibracket.z == 0) break; } if (thetabracket.z == 0) break; } if (psibracket.z == 0) break; } } void d_AccumulateSpectra(tfloat* d_ps1d, tfloat* d_defoci, uint nspectra, tfloat* d_accumulated, tfloat accumulateddefocus, tfloat* d_perbatchoffsets, CTFParams p, CTFFitParams fp, uint batch) { uint length = fp.dimsperiodogram.x / 2; uint relevantlength = fp.maskouterradius - fp.maskinnerradius; CTFParamsLean lean = CTFParamsLean(p, toInt3(fp.dimsperiodogram)); dim3 TpB = dim3(tmin(128, NextMultipleOf(relevantlength, 32))); dim3 grid = dim3((relevantlength + TpB.x - 1) / TpB.x, batch); AccumulateSpectraKernel << <grid, TpB >> > (d_ps1d, d_defoci, nspectra, length, d_accumulated, accumulateddefocus, d_perbatchoffsets, fp.maskinnerradius, relevantlength, lean.Cs, lean.lambda, lean.ny); } __global__ void AccumulateSpectraKernel(tfloat* d_ps1d, tfloat* d_defoci, uint nspectra, uint length, tfloat* d_accumulated, tfloat accumulateddefocus, tfloat* d_perbatchoffsets, uint lowfreq, uint relevantlength, tfloat cs, tfloat lambda, tfloat pxfactor) { uint id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= relevantlength) return; tfloat sum = 0; uint samples = 0; double K = (double)(id + lowfreq) * pxfactor; double K2 = K * K; double K4 = K2 * K2; double defocusoffset = d_perbatchoffsets[blockIdx.y]; double D = (accumulateddefocus + defocusoffset) * 1e10; double lambda2 = lambda * lambda; double lambda4 = lambda2 * lambda2; double cs2 = cs * cs; for (uint n = 0; n < nspectra; n++, d_ps1d += length) { double d = (d_defoci[n] + defocusoffset) * 1e10; if (d < 0 != D < 0) // Combining defoci with different signs won't work continue; double k = sqrt(abs(abs(d) - sqrt(cs2 * K4 * lambda4 + 2.0 * cs * D * K2 * lambda2 + d * d)) / (cs * lambda2)); k /= pxfactor; if (ceil(k) >= length) // Out of range continue; // Cubic interpolation uint p1 = k; tfloat sample0 = d_ps1d[tmax(1U, p1) - 1]; tfloat sample1 = d_ps1d[p1]; tfloat sample2 = d_ps1d[tmin(length - 1, p1 + 1)]; tfloat sample3 = d_ps1d[tmin(length - 1, p1 + 2)]; tfloat factor0 = -0.5f * sample0 + 1.5f * sample1 - 1.5f * sample2 + 0.5f * sample3; tfloat factor1 = sample0 - 2.5f * sample1 + 2.0f * sample2 - 0.5f * sample3; tfloat factor2 = -0.5f * sample0 + 0.5f * sample2; tfloat factor3 = sample1; tfloat interp = k - (tfloat)p1; sum += ((factor0 * interp + factor1) * interp + factor2) * interp + factor3; samples++; } d_accumulated[relevantlength * blockIdx.y + id] = sum / (tfloat)tmax(1U, samples); } /////////////////////////////////////////// //Fit defocus and tilt in tilted specimen// /////////////////////////////////////////// void h_CTFTiltFit(tfloat* h_image, int2 dimsimage, uint nimages, float overlapfraction, std::vector<CTFTiltParams> &startparams, CTFFitParams fp, tfloat maxtheta, tfloat2 &specimentilt, tfloat* h_defoci) { tfloat* d_image; cudaMalloc((void**)&d_image, Elements2(dimsimage) * sizeof(tfloat)); /*tfloat* d_imagecropped; cudaMalloc((void**)&d_imagecropped, Elements2(dimsimage) * sizeof(tfloat));*/ tfloat3 phibracket = tfloat3(0.0f, ToRad(360.0f), ToRad(20.0f)); tfloat3 thetabracket = tfloat3(0.0f, maxtheta, ToRad(4.0f)); for (uint i = 0; i < nimages; i++) h_defoci[i] = 0; for (uint r = 0; r < 4; r++) { int2 anglegrid = toInt2((phibracket.y - phibracket.x + 1e-5f) / phibracket.z + 1, (thetabracket.y - thetabracket.x + 1e-5f) / thetabracket.z + 1); std::vector<tfloat3> v_angles; PopulateAngles(v_angles, phibracket, thetabracket, tfloat3(0)); tfloat* h_scores = MallocValueFilled(Elements2(anglegrid), (tfloat)0); tfloat* h_samples = MallocValueFilled(Elements2(anglegrid), (tfloat)0); if (Elements2(anglegrid) != v_angles.size()) throw; for (uint i = 0; i < nimages; i++) { cudaMemcpy(d_image, h_image + Elements2(dimsimage) * i, Elements2(dimsimage) * sizeof(tfloat), cudaMemcpyHostToDevice); /*int2 dimscropped = toInt2(cos(startparams[i].stageangle.y) * dimsimage.x, dimsimage.y); dimscropped.x += dimscropped.x % 2; d_Pad(d_image, d_imagecropped, toInt3(dimsimage), toInt3(dimscropped), T_PAD_VALUE, (tfloat)0);*/ std::vector<tfloat2> v_results; CTFTiltParams adjustedparams(startparams[i].imageangle, startparams[i].stageangle, startparams[i].specimenangle, startparams[i].centerparams); adjustedparams.centerparams.defocus += h_defoci[i]; d_CTFTiltFit(d_image, dimsimage, overlapfraction, adjustedparams, fp, v_angles, 3, v_results); // First, add to the average score grid... for (uint n = 0; n < Elements2(anglegrid); n++) { h_samples[n] += 1.0;// pow(cos(startparams[i].stageangle.y), 2.0); h_scores[n] += v_results[n].x;// *pow(cos(startparams[i].stageangle.y), 2.0); } // ... then, take the defocus value with the highest score std::sort(v_results.begin(), v_results.end(), [](const tfloat2 &a, const tfloat2 &b) -> bool { return a.x > b.x; }); h_defoci[i] += v_results[0].y; // Update adjustment for next iteration so it doesn't start the search from scratch } for (uint n = 0; n < Elements2(anglegrid); n++) h_scores[n] /= h_samples[n]; //WriteToBinaryFile("d_scores.bin", h_scores, Elements2(anglegrid) * sizeof(tfloat)); free(h_samples); int2 maxposition; tfloat maxvalue = -1e30f; for (uint y = 0; y < anglegrid.y; y++) for (uint x = 0; x < anglegrid.x; x++) if (h_scores[y * anglegrid.x + x] > maxvalue) { maxposition = toInt2(x, y); maxvalue = h_scores[y * anglegrid.x + x]; } free(h_scores); // Update tilt estimate and shrink search brackets specimentilt = tfloat2(phibracket.x + maxposition.x * phibracket.z, thetabracket.x + maxposition.y * thetabracket.z); phibracket = tfloat3(specimentilt.x - phibracket.z * 0.75f, specimentilt.x + phibracket.z * 0.75f, phibracket.z / 4.0f); thetabracket = tfloat3(specimentilt.y - thetabracket.z * 0.75f, specimentilt.y + thetabracket.z * 0.75f, thetabracket.z / 4.0f); // Adjust defocus search bracket after first iteration, as the first defocus estimate can't be too far from the truth if (r == 0) { fp.defocus.x = -fp.defocus.z * 1.5; fp.defocus.y = fp.defocus.z * 1.5; fp.defocus.z /= 4.0; } } //cudaFree(d_imagecropped); cudaFree(d_image); } void d_CTFTiltFit(tfloat* d_image, int2 dimsimage, float overlapfraction, CTFTiltParams &startparams, CTFFitParams fp, std::vector<tfloat3> &v_angles, int defocusrefinements, std::vector<tfloat2> &v_results) { CTFFitParams originalfp = fp; fp.maskinnerradius = 0; fp.maskouterradius = fp.dimsperiodogram.x / 2; int2 dimspolar = GetCart2PolarFFTSize(fp.dimsperiodogram); dimspolar.x = fp.maskouterradius - fp.maskinnerradius; // Create grid, allocate memory for spectra int2 dimsgrid; int3* h_origins = GetEqualGridSpacing(dimsimage, fp.dimsperiodogram, overlapfraction, dimsgrid); uint norigins = Elements2(dimsgrid); // Allocate memory for spectra and point coords tfloat* d_ps1d; cudaMalloc((void**)&d_ps1d, dimspolar.x * norigins * sizeof(tfloat)); float2* d_ps1dcoords; cudaMalloc((void**)&d_ps1dcoords, dimspolar.x * norigins * sizeof(float2)); { int3* d_origins = (int3*)CudaMallocFromHostArray(h_origins, norigins * sizeof(int3)); tfloat* d_ps1dmin; cudaMalloc((void**)&d_ps1dmin, dimspolar.x * sizeof(tfloat)); tfloat* d_ps1dmax; cudaMalloc((void**)&d_ps1dmax, dimspolar.x * sizeof(tfloat)); tfloat* d_ps2d; // Extracted spectra in Cartesian coords cudaMalloc((void**)&d_ps2d, ElementsFFT2(fp.dimsperiodogram) * norigins * sizeof(tfloat)); tfloat* d_ps2dpolar; cudaMalloc((void**)&d_ps2dpolar, Elements2(dimspolar) * norigins * sizeof(tfloat)); float2* d_ps2dcoords; cudaMalloc((void**)&d_ps2dcoords, Elements2(dimspolar) * sizeof(float2)); CTFParams* h_params = (CTFParams*)malloc(norigins * sizeof(CTFParams)); for (uint n = 0; n < norigins; n++) h_params[n] = startparams.centerparams; d_CTFFitCreateTarget2D(d_image, dimsimage, d_origins, h_params, norigins, fp, d_ps2dpolar, d_ps2dcoords, true, d_ps1dmin, d_ps1dmax); // All averaged to one for background d_SubtractVector(d_ps1dmax, d_ps1dmin, d_ps1dmax, dimspolar.x); d_MaxOp(d_ps1dmax, 0.2f, d_ps1dmax, dimspolar.x); // Extract, average, convert to polar d_CTFPeriodogram(d_image, dimsimage, d_origins, norigins, fp.dimsperiodogram, fp.dimsperiodogram, d_ps2d); d_Cart2PolarFFT(d_ps2d, d_ps2dpolar, fp.dimsperiodogram, T_INTERP_CUBIC, fp.maskinnerradius, fp.maskouterradius, norigins); // Create polar background image tfloat* d_ps2dmin; cudaMalloc((void**)&d_ps2dmin, Elements2(dimspolar) * sizeof(tfloat)); CudaMemcpyMulti(d_ps2dmin, d_ps1dmin, dimspolar.x, dimspolar.y, 1); //d_WriteMRC(d_ps2dmin, toInt3(dimspolar.x, dimspolar.y, 1), "d_ps2dmin.mrc"); tfloat* d_ps2dmax; cudaMalloc((void**)&d_ps2dmax, Elements2(dimspolar) * sizeof(tfloat)); CudaMemcpyMulti(d_ps2dmax, d_ps1dmax, dimspolar.x, dimspolar.y, 1); //d_WriteMRC(d_ps2dmax, toInt3(dimspolar.x, dimspolar.y, 1), "d_ps2dmax.mrc"); // Subtract background and normalize d_SubtractVector(d_ps2dpolar, d_ps2dmin, d_ps2dpolar, Elements2(dimspolar), norigins); d_DivideSafeByVector(d_ps2dpolar, d_ps2dmax, d_ps2dpolar, Elements2(dimspolar), norigins); d_NormMonolithic(d_ps2dpolar, d_ps2dpolar, Elements2(dimspolar), T_NORM_MEAN01STD, norigins); // Create 1D targets and normalize d_CTFFitCreateTarget1D(d_ps2dpolar, d_ps2dcoords, dimspolar, h_params, norigins, fp, d_ps1d, d_ps1dcoords); d_ValueFill(d_ps2d, dimspolar.x, (tfloat)0); d_ValueFill(d_ps2d + originalfp.maskinnerradius, originalfp.maskouterradius - originalfp.maskinnerradius, (tfloat)1); CudaMemcpyMulti(d_ps2d, d_ps2d, dimspolar.x, norigins); d_NormMonolithic(d_ps1d, d_ps1d, dimspolar.x, d_ps2d, T_NORM_MEAN01STD, norigins); //d_WriteMRC(d_ps1d, toInt3(dimspolar.x, norigins, 1), "d_ps1d.mrc"); cudaFree(d_ps2dmax); cudaFree(d_ps2dmin); cudaFree(d_ps2dcoords); cudaFree(d_ps2dpolar); cudaFree(d_ps2d); cudaFree(d_ps1dmax); cudaFree(d_ps1dmin); cudaFree(d_origins); free(h_params); } fp = originalfp; dimspolar.x = fp.maskouterradius - fp.maskinnerradius; // Store radius & angle for each 1D target point { float2* h_ps1dcoords = (float2*)malloc(dimspolar.x * sizeof(float2)); float invhalfsize = 2.0f / (float)fp.dimsperiodogram.x; for (int r = 0; r < dimspolar.x; r++) { float rf = (float)(r + fp.maskinnerradius) * invhalfsize; h_ps1dcoords[r] = make_float2(rf, 0.0f); } cudaMemcpy(d_ps1dcoords, h_ps1dcoords, dimspolar.x * sizeof(float2), cudaMemcpyHostToDevice); free(h_ps1dcoords); } { for (int a = 0; a < v_angles.size(); a++) { CTFFitParams anglefp = fp; std::vector<std::pair<tfloat, CTFParams> > v_params; AddCTFParamsRange(v_params, anglefp); // Calculate defocus offsets across grid tfloat* d_griddefoci; { tfloat* h_griddefoci = (tfloat*)malloc(norigins * sizeof(tfloat)); CTFTiltParams currenttilt(startparams.imageangle, startparams.stageangle, tfloat2(v_angles[a].x, v_angles[a].y), startparams.centerparams); currenttilt.GetZGrid2D(dimsimage, fp.dimsperiodogram, h_origins, norigins, h_griddefoci); d_griddefoci = (tfloat*)CudaMallocFromHostArray(h_griddefoci, norigins * sizeof(tfloat)); free(h_griddefoci); } for (uint d = 0; d <= defocusrefinements; d++) { // Defocus search space tfloat* h_defocusoffsets = (tfloat*)malloc(v_params.size() * sizeof(tfloat)); CTFParams* h_params = (CTFParams*)malloc(v_params.size() * sizeof(CTFParams)); // Adjust copies to various defoci for (uint n = 0; n < v_params.size(); n++) { h_params[n] = startparams.centerparams; h_params[n].defocus += v_params[n].second.defocus; h_defocusoffsets[n] = v_params[n].second.defocus; } // Finally, accumulate the spectra based on the suggested defocus values tfloat* d_defocusoffsets = (tfloat*)CudaMallocFromHostArray(h_defocusoffsets, v_params.size() * sizeof(tfloat)); tfloat* d_accumulated; cudaMalloc((void**)&d_accumulated, dimspolar.x * v_params.size() * sizeof(tfloat)); d_AccumulateSpectra(d_ps1d, d_griddefoci, norigins, d_accumulated, startparams.centerparams.defocus, d_defocusoffsets, startparams.centerparams, fp, v_params.size()); d_NormMonolithic(d_accumulated, d_accumulated, dimspolar.x, T_NORM_MEAN01STD, v_params.size()); //CudaWriteToBinaryFile("d_accumulated.bin", d_accumulated, dimspolar.x * v_params.size() * sizeof(tfloat)); cudaFree(d_defocusoffsets); free(h_defocusoffsets); // Simulate CTF tfloat* d_ctfsim; cudaMalloc((void**)&d_ctfsim, dimspolar.x * v_params.size() * sizeof(tfloat)); d_CTFSimulate(h_params, d_ps1dcoords, NULL, d_ctfsim, dimspolar.x, true, false, v_params.size()); d_NormMonolithic(d_ctfsim, d_ctfsim, dimspolar.x, T_NORM_MEAN01STD, v_params.size()); //CudaWriteToBinaryFile("d_ctfsim.bin", d_ctfsim, dimspolar.x * v_params.size() * sizeof(tfloat)); free(h_params); // Correlate d_MultiplyByVector(d_ctfsim, d_accumulated, d_ctfsim, dimspolar.x * v_params.size()); d_SumMonolithic(d_ctfsim, d_accumulated, dimspolar.x, v_params.size()); tfloat* h_scores = (tfloat*)MallocFromDeviceArray(d_accumulated, v_params.size() * sizeof(tfloat)); for (uint n = 0; n < v_params.size(); n++) v_params[n].first = h_scores[n] / (tfloat)dimspolar.x; free(h_scores); cudaFree(d_ctfsim); cudaFree(d_accumulated); // Sort defoci by score in descending order std::sort(v_params.begin(), v_params.end(), [](const std::pair<tfloat, CTFFitParams> &a, const std::pair<tfloat, CTFFitParams> &b) -> bool { return a.first > b.first; }); if (d < defocusrefinements) { std::vector<std::pair<tfloat, CTFParams> > v_newparams; anglefp.defocus.z /= 4.0; for (uint f = 0; f < 5; f++) { CTFFitParams localfp; localfp.defocus = tfloat3(v_params[f].second.defocus - anglefp.defocus.z * 3.0, v_params[f].second.defocus + anglefp.defocus.z * 3.0, anglefp.defocus.z); AddCTFParamsRange(v_newparams, localfp); } v_params = v_newparams; } } cudaFree(d_griddefoci); v_results.push_back(tfloat2(v_params[0].first, v_params[0].second.defocus)); } } cudaFree(d_ps1d); cudaFree(d_ps1dcoords); free(h_origins); } }
60b2152366514114cb95046d0ac401105a8a6d76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * flyingEdgesAlgorithm.cpp * * Created on: Feb 17, 2017 * Author: dbourge */ #include "FlyingEdgesAlgorithm.h" #include "CudaMarchingCubesTables.h" #include <numeric> #include <algorithm> // TODO #include <iostream> // TODO #define MAX_X_GRID 65535 // says larger but doesnt work if larger.. #define MAX_Y_GRID 65535 #define MAX_Z_GRID 65535 #define MAX_X_BLOCK 1024 #define MAX_Y_BLOCK 1024 #define MAX_Z_BLOCK 64 #define MAX_THREAD_PER_BLOCK 1024 #define DEBUG true // TODO figure out how to handle errors bool validKernelSize(uint3 const& gridDim, uint3 const& blockDim) { if(gridDim.x > MAX_X_GRID) return false; if(gridDim.y > MAX_Y_GRID) return false; if(gridDim.z > MAX_Z_GRID) return false; if(blockDim.x > MAX_X_BLOCK) return false; if(blockDim.y > MAX_Y_BLOCK) return false; if(blockDim.z > MAX_Z_BLOCK) return false; if(blockDim.x * blockDim.y * blockDim.z > MAX_THREAD_PER_BLOCK) return false; return true; } // TODO make sure pointValues stored in const memory /////////////////////////////////////////////////////////////////////////////// // Pass 1 of the algorithm /////////////////////////////////////////////////////////////////////////////// __device__ uchar calcCaseEdge( bool const& prevEdge, bool const& currEdge) { // o -- is greater than or equal to // case 0: (i-1) o-----o (i) | (_,j,k) // case 1: (i-1) x-----o (i) | (_,j+1,k) // case 2: (i-1) o-----x (i) | (_,j,k+1) // case 3: (i-1) x-----x (i) | (_,j+1,k+1) if(prevEdge && currEdge) return 0; if(!prevEdge && currEdge) return 1; if(prevEdge && !currEdge) return 2; else // !prevEdge && !currEdge return 3; } __global__ void pass1gpu_edgeCases( scalar_t* pointValues, scalar_t isoval, int nx, int ny, uchar* edgeCases) { // Each row has several blocks // Each thread is one point int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y; int k = blockIdx.z; __shared__ bool isGE[FE_BLOCK_WIDTH_PLUS_ONE]; if(i < nx) isGE[threadIdx.x] = pointValues[k*nx*ny + j*nx + i] >= isoval; if(threadIdx.x == 0 && i < nx-1) { isGE[blockDim.x] = pointValues[k*nx*ny + j*nx + i + blockDim.x] >= isoval; } __syncthreads(); if(i < nx-1) { uchar caseEdge = calcCaseEdge(isGE[threadIdx.x], isGE[threadIdx.x + 1]); edgeCases[k*(nx-1)*ny + j*(nx-1) + i] = caseEdge; } } __global__ void pass1gpu_trim( int nx, int ny, int nz, // input uchar* edgeCases, // input FlyingEdgesAlgorithm::gridEdge* gridEdges) // output { int j = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.y * blockDim.y + threadIdx.y; if(j >= ny || k >= nz) return; size_t xl = nx; size_t xr = 0; uchar* curEdgeCases = edgeCases + k*(nx-1)*ny + j*(nx-1); for(int i = 0; i != nx-1; ++i) { if(curEdgeCases[i] == 1 || curEdgeCases[i] == 2) { if(xl == nx) xl = i; xr = i+1; } } gridEdges[k*ny + j].xl = xl; gridEdges[k*ny + j].xr = xr; } void FlyingEdgesAlgorithm::pass1() { int tx = FE_BLOCK_WIDTH; uint3 gridDim = make_uint3(((nx-1) + tx - 1) / tx, ny, nz); uint3 blockDim = make_uint3(tx, 1, 1); if(!validKernelSize(gridDim, blockDim)) std::cout << "GAHHHHHHHHHHHH GP2 ENGINE " << __LINE__ << std::endl; // TODO hipLaunchKernelGGL(( pass1gpu_edgeCases), dim3(gridDim), dim3(blockDim), 0, 0, pointValues, isoval, nx, ny, edgeCases); int ty = FE_BLOCK_WIDTH_Y; int tz = FE_BLOCK_WIDTH_Z; gridDim = make_uint3((ny + ty - 1) / ty, (nz + tz - 1) / tz, 1); blockDim = make_uint3(ty, tz, 1); if(!validKernelSize(gridDim, blockDim)) std::cout << "GAHHHHHHHHHHHH GP2 ENGINE " << __LINE__ << std::endl; // TODO hipLaunchKernelGGL(( pass1gpu_trim), dim3(gridDim), dim3(blockDim), 0, 0, nx, ny, nz, edgeCases, gridEdges); hipDeviceSynchronize(); if(DEBUG) { int numGE = nz*ny; gridEdge* hostGEs = (gridEdge*)malloc(numGE*sizeof(gridEdge)); hipMemcpy(hostGEs, gridEdges, numGE*sizeof(gridEdge), hipMemcpyDeviceToHost); int numCubes=(nx-1)*ny*nz; size_t count = 0; uchar* hoseEdgeCases = (uchar*)malloc(numCubes*sizeof(uchar)); hipMemcpy(hoseEdgeCases, edgeCases, numCubes*sizeof(uchar), hipMemcpyDeviceToHost); for(int idx = 0; idx != numCubes; ++idx) { uchar const& val = hoseEdgeCases[idx]; count += val; } std::cout << "Edgecase counter: " << count << std::endl; free(hoseEdgeCases); size_t countL = 0; size_t countR = 0; for(int idx = 0; idx != numGE; ++idx) { countL += hostGEs[idx].xl; countR += hostGEs[idx].xr; } std::cout << "xl, xr: " << countL << ", " << countR << std::endl; free(hostGEs); } } /////////////////////////////////////////////////////////////////////////////// // Pass 2 of the algorithm /////////////////////////////////////////////////////////////////////////////// __device__ void calcTrimValues( int& xl, int& xr, FlyingEdgesAlgorithm::gridEdge const& ge0, FlyingEdgesAlgorithm::gridEdge const& ge1, FlyingEdgesAlgorithm::gridEdge const& ge2, FlyingEdgesAlgorithm::gridEdge const& ge3) { xl = min(ge0.xl, min(ge1.xl, min(ge2.xl, ge3.xl))); xr = max(ge0.xr, max(ge1.xr, max(ge2.xr, ge3.xr))); if(xl > xr) xl = xr; } __device__ uchar calcCubeCase( uchar const& ec0, uchar const& ec1, uchar const& ec2, uchar const& ec3) { // ec0 | (_,j,k) // ec1 | (_,j+1,k) // ec2 | (_,j,k+1) // ec3 | (_,j+1,k+1) uchar caseId = 0; if((ec0 == 0) || (ec0 == 2)) // 0 | (i,j,k) caseId |= 1; if((ec0 == 0) || (ec0 == 1)) // 1 | (i+1,j,k) caseId |= 2; if((ec1 == 0) || (ec1 == 1)) // 2 | (i+1,j+1,k) caseId |= 4; if((ec1 == 0) || (ec1 == 2)) // 3 | (i,j+1,k) caseId |= 8; if((ec2 == 0) || (ec2 == 2)) // 4 | (i,j,k+1) caseId |= 16; if((ec2 == 0) || (ec2 == 1)) // 5 | (i+1,j,k+1) caseId |= 32; if((ec3 == 0) || (ec3 == 1)) // 6 | (i+1,j+1,k+1) caseId |= 64; if((ec3 == 0) || (ec3 == 2)) // 7 | (i,j+1,k+1) caseId |= 128; return caseId; } __global__ void pass2gpu_cubeCases( int nx, int ny, int nz, uchar* edgeCases, FlyingEdgesAlgorithm::gridEdge* gridEdges, int* triCounter, uchar* cubeCases) { int j = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.y * blockDim.y + threadIdx.y; if(j >= ny-1 || k >= nz-1) return; FlyingEdgesAlgorithm::gridEdge& ge0 = gridEdges[k*ny + j]; FlyingEdgesAlgorithm::gridEdge& ge1 = gridEdges[k*ny + j + 1]; FlyingEdgesAlgorithm::gridEdge& ge2 = gridEdges[(k+1)*ny + j]; FlyingEdgesAlgorithm::gridEdge& ge3 = gridEdges[(k+1)*ny + j + 1]; uchar* ec0 = edgeCases + k*ny*(nx-1) + j*(nx-1); uchar* ec1 = edgeCases + k*ny*(nx-1) + (j+1)*(nx-1); uchar* ec2 = edgeCases + (k+1)*ny*(nx-1) + j*(nx-1); uchar* ec3 = edgeCases + (k+1)*ny*(nx-1) + (j+1)*(nx-1); int xl, xr; calcTrimValues(xl, xr, ge0, ge1, ge2, ge3); int triCount = 0; uchar* curCubeCases = cubeCases + k*(nx-1)*(ny-1) + j*(nx-1); int xstart = 0; int ystart = 0; int zstart = 0; // TODO don't set initial values in gridEdge Constructor; const bool* isCut; for(int i = xl; i != xr; ++i) // What happens here on a gpu? // I imagine it takes the max xr-xl of all blocks { uchar caseId = calcCubeCase(ec0[i], ec1[i], ec2[i], ec3[i]); curCubeCases[i] = caseId; // Can't imagine this would do anything on a gpu unless all threads // on a block evaluated to the same value. if(caseId == 0 || caseId == 255) { continue; } triCount += cuda_util::numTris[caseId]; isCut = cuda_util::isCut[caseId]; // if xr == nx-1, then xr-1 is cut // so this will be set xstart += isCut[0]; ystart += isCut[3]; zstart += isCut[8]; } triCounter[k*(ny-1) + j] = triCount; if(xr == nx-1) { // isCut was set at i = xr-1 ystart += isCut[1]; zstart += isCut[9]; } ge0.xstart = xstart; ge0.ystart = ystart; ge0.zstart = zstart; } __global__ void pass2gpu_ghost_xz( int nx, int ny, int nz, uchar* edgeCases, FlyingEdgesAlgorithm::gridEdge* gridEdges) { int k = blockIdx.x * blockDim.x + threadIdx.x; if(k >= nz) // This function will deal with gridEdge at (_, ny-1, nz-1) return; bool isCorner = k == nz-1; int j = ny-1; FlyingEdgesAlgorithm::gridEdge& ge0 = gridEdges[k*ny + j]; // If isCorner, this is just bogus. FlyingEdgesAlgorithm::gridEdge& ge1 = gridEdges[(1-isCorner)*(k+1)*ny + j]; uchar* ec0 = edgeCases + k*ny*(nx-1) + j*(nx-1); // If isCorner, this is just bogus uchar* ec1 = edgeCases + (1-isCorner)*(k+1)*ny*(nx-1) + j*(nx-1); int xl = min(ge0.xl, nx*isCorner + (1-isCorner)*ge1.xl); int xr = max(ge0.xr, (1-isCorner)*ge1.xr); int xstart = 0; int zstart = 0; // TODO don't set initial values in gridEdge Constructor; uchar c0; uchar c1; for(int i = xl; i != xr; ++i) { c0 = ec0[i]; c1 = ec1[i]; // see if the edges are cut xstart += (c0 == 1 || c0 == 2); // bogus if isCorner zstart += ( (c0 == 0 && c1 == 1) || (c0 == 0 && c1 == 3) || (c0 == 1 && c1 == 2) || (c0 == 2 && c1 == 3) ); } if(xr == nx-1) { // bogus if isCorner zstart += ( (c0 == 0 && c1 == 2) || (c0 == 0 && c1 == 3) || (c0 == 1 && c1 == 2) || (c0 == 1 && c1 == 3) ); } ge0.xstart = xstart; ge0.ystart = 0; ge0.zstart = zstart*(1-isCorner); } __global__ void pass2gpu_ghost_xy( int nx, int ny, int nz, uchar* edgeCases, FlyingEdgesAlgorithm::gridEdge* gridEdges) { int j = blockIdx.x * blockDim.x + threadIdx.x; if(j >= ny-1) return; int k = nz-1; FlyingEdgesAlgorithm::gridEdge& ge0 = gridEdges[k*ny + j]; FlyingEdgesAlgorithm::gridEdge& ge1 = gridEdges[k*ny + j + 1]; uchar* ec0 = edgeCases + k*ny*(nx-1) + j*(nx-1); uchar* ec1 = edgeCases + k*ny*(nx-1) + (j+1)*(nx-1); int xl = min(ge0.xl, ge1.xl); int xr = max(ge0.xr, ge1.xr); if(xl >= xr) return; int xstart = 0; int ystart = 0; // TODO don't set initial values in gridEdge Constructor; uchar c0; uchar c1; for(int i = xl; i != xr; ++i) { c0 = ec0[i]; c1 = ec1[i]; // see if the edges are cut xstart += (c0 == 1 || c0 == 2); ystart += ( (c0 == 0 && c1 == 1) || (c0 == 0 && c1 == 3) || (c0 == 1 && c1 == 2) || (c0 == 2 && c1 == 3) ); } if(xr == nx-1) { ystart += ( (c0 == 0 && c1 == 2) || (c0 == 0 && c1 == 3) || (c0 == 1 && c1 == 2) || (c0 == 1 && c1 == 3) ); } ge0.xstart = xstart; ge0.ystart = ystart; ge0.zstart = 0; } void FlyingEdgesAlgorithm::pass2() { // pass2 calculates // 1) cubeCases for each block ray // 2) triCount for each block ray // 3) edgeRay count // 1st kernel: Calculate the 0, 1, 2 edge ray, cube cases, tricount // 2nd kernel: Calculate lost edges int ty = FE_BLOCK_WIDTH_Y; int tz = FE_BLOCK_WIDTH_Z; uint3 gridDim = make_uint3(((ny-1) + ty - 1) / ty, ((nz-1) + tz - 1) / tz, 1); uint3 blockDim = make_uint3(ty, tz, 1); if(!validKernelSize(gridDim, blockDim)) std::cout << "GAHHHHHHHHHHHH GP2 ENGINE " << __LINE__ << std::endl; // TODO hipLaunchKernelGGL(( pass2gpu_cubeCases), dim3(gridDim), dim3(blockDim), 0, 0, nx, ny, nz, edgeCases, gridEdges, // modified triCounter, // modified cubeCases); // modified // POSSIBLE to do this here TODO // hipFree(edgeCases); if(DEBUG) { std::cout << "MEOWWWWWW " << hipGetErrorString(hipGetLastError()) << std::endl; } if(DEBUG) { size_t sz = (nx-1)*(ny-1)*(nz-1)*sizeof(uchar); hipDeviceSynchronize(); uchar* hostCubeCases = (uchar*)malloc(sz); hipMemcpy(hostCubeCases, cubeCases, sz, hipMemcpyDeviceToHost); int count = 0; // TODO hostCubeCases is not the same every time. for(int i = 0; i != (nx-1)*(ny-1)*(nz-1); ++i) { if(hostCubeCases[i] != 0 && hostCubeCases[i] != 255) count += hostCubeCases[i]; } std::cout << "Count cube cases " << count << std::endl; free(hostCubeCases); } // TODO these can be launched and executed independently of each other int bw = FE_BLOCK_WIDTH; // Making sure that the xz face takes care of the (_, ny-1, nz-1) gridEdge // BE CAREFUL. xz takes care of corner. don't use (nz-1) hipLaunchKernelGGL(( pass2gpu_ghost_xz), dim3((nz + bw - 1) / bw), dim3(bw), 0, 0, nx, ny, nz, edgeCases, gridEdges); hipLaunchKernelGGL(( pass2gpu_ghost_xy), dim3(((ny-1) + bw - 1) / bw), dim3(bw), 0, 0, nx, ny, nz, edgeCases, gridEdges); hipDeviceSynchronize(); std::cout << "MEOWWWWWW " << hipGetErrorString(hipGetLastError()) << std::endl; if(DEBUG) { size_t sz_ge = nx*ny*sizeof(gridEdge); gridEdge* hostges = (gridEdge*)malloc(sz_ge); auto w = hipMemcpy(hostges, gridEdges, sz_ge, hipMemcpyDeviceToHost); if(w != hipSuccess) { std::cout << "GHASDCFAKSCLKASCKAS:CKASL:CKAS:DLCKASD:" << std::endl; std::cout << hipGetErrorString(w) << std::endl; } int sumxstart = 0; for(int idx = 0; idx != nx*ny; ++idx) { sumxstart += hostges[idx].xstart; } std::cout << "sumxstart " << sumxstart << std::endl; free(hostges); } } /////////////////////////////////////////////////////////////////////////////// // Pass 3 of the algorithm /////////////////////////////////////////////////////////////////////////////// __global__ void pass3gpu_blockAccum( int nx, int ny, int nz, // which are needed TODO? int* triCounter, FlyingEdgesAlgorithm::gridEdge* gridEdges, int* blockAccum) { int k = blockIdx.y * blockDim.y + threadIdx.y; // step 1: accumulate individual y thread // step 2: calc block sum // step 3: __syncthreads // step 4: add to individual y thread __shared__ int accum[4*FE_BLOCK_WIDTH]; if(k < nz) { int tmp; int accumX = 0; int accumY = 0; int accumZ = 0; int accumTri = 0; for(int j = 0; j != ny; ++j) { FlyingEdgesAlgorithm::gridEdge& ge = gridEdges[k*ny + j]; tmp = ge.xstart; ge.xstart = accumX; accumX += tmp; tmp = ge.ystart; ge.ystart = accumY; accumY += tmp; tmp = ge.zstart; ge.zstart = accumZ; accumZ += tmp; } if(k < nz-1) { for(int j = 0; j != ny-1; ++j) { int& curTriCount = triCounter[k*(ny-1) + j]; tmp = curTriCount; curTriCount = accumTri; accumTri += tmp; } } accum[4*threadIdx.y + 0] = accumX; accum[4*threadIdx.y + 1] = accumY; accum[4*threadIdx.y + 2] = accumZ; accum[4*threadIdx.y + 3] = accumTri; } __syncthreads(); if(k < nz) { if(threadIdx.y == 0) // agh! { for(int idx = 1; idx != blockDim.y; ++idx) { accum[4*idx + 0] += accum[4*(idx-1) + 0]; accum[4*idx + 1] += accum[4*(idx-1) + 1]; accum[4*idx + 2] += accum[4*(idx-1) + 2]; accum[4*idx + 3] += accum[4*(idx-1) + 3]; } // answer for global accumulation blockAccum[4*blockIdx.y + 0] = accum[4*(blockDim.y-1) + 0]; blockAccum[4*blockIdx.y + 1] = accum[4*(blockDim.y-1) + 1]; blockAccum[4*blockIdx.y + 2] = accum[4*(blockDim.y-1) + 2]; blockAccum[4*blockIdx.y + 3] = accum[4*(blockDim.y-1) + 3]; } } __syncthreads(); if(threadIdx.y == 0 || k >= nz) return; bool isEndK = k == nz-1; for(int j = 0; j != ny-1; ++j) { FlyingEdgesAlgorithm::gridEdge& ge = gridEdges[k*ny + j]; ge.xstart += accum[4*(threadIdx.y-1) + 0]; ge.ystart += accum[4*(threadIdx.y-1) + 1]; ge.zstart += accum[4*(threadIdx.y-1) + 2]; // put z stuff here.. if(!isEndK) triCounter[k*(ny-1) + j] = accum[4*(threadIdx.y-1) + 3]; } FlyingEdgesAlgorithm::gridEdge& ge = gridEdges[k*ny + (ny-1)]; ge.xstart += accum[4*(threadIdx.y-1) + 0]; ge.ystart += accum[4*(threadIdx.y-1) + 1]; ge.zstart += accum[4*(threadIdx.y-1) + 2]; } __global__ // TODO can split up along j here easy enough. void pass3gpu_gridAccum( int nx, int ny, int nz, // which are needed TODO? int* triCounter, FlyingEdgesAlgorithm::gridEdge* gridEdges, int* blockAccum) // used as input here { // not adding to the first block! // // add to individual y threads int k = (blockIdx.z + 1)*blockDim.z + threadIdx.z; if (k >= nz) return; int addX = blockAccum[4*blockIdx.z + 0]; int addY = blockAccum[4*blockIdx.z + 1]; int addZ = blockAccum[4*blockIdx.z + 2]; int addTri = blockAccum[4*blockIdx.z + 3]; for(int j = 0; j != ny; ++j) { FlyingEdgesAlgorithm::gridEdge& ge = gridEdges[k*ny + j]; ge.xstart += addX; ge.ystart += addY; ge.zstart += addZ; } if(k >= nz-1) return; for(int j = 0; j != ny-1; ++j) { triCounter[k*(ny-1) + j] += addTri; } } // Can make prettier? void FlyingEdgesAlgorithm::pass3() { // Split the z axis // Kernel 1: calculate the accum values on block sync // then accum individual values // Use that info accum each block (except the first one) // Kernel 2: just add values to individual threads int tz = FE_BLOCK_WIDTH; int numBlocks = (nz + tz - 1) / tz; // there are four because: xstart, ystart, zstart, triaccum int sizeBlocks = 4 * numBlocks * sizeof(int); uint3 gridDim = make_uint3(1, numBlocks, 1); uint3 blockDim = make_uint3(1, tz, 1); int* hostBlockAccum = (int*)malloc(sizeBlocks); for(int idx = 0; idx != 4*numBlocks; ++idx) { hostBlockAccum[idx] = 0; } int* deviceBlockAccum; hipMalloc(&deviceBlockAccum, sizeBlocks); hipMemcpy(deviceBlockAccum, hostBlockAccum, sizeBlocks, hipMemcpyHostToDevice); // Accumulate values locally if(!validKernelSize(gridDim, blockDim)) std::cout << "GAHHHHHHHHHHHH GP2 ENGINE " << __LINE__ << std::endl; // TODO hipLaunchKernelGGL(( pass3gpu_blockAccum), dim3(gridDim), dim3(blockDim), 0, 0, nx, ny, nz, triCounter, gridEdges, deviceBlockAccum); hipMemcpy(hostBlockAccum, deviceBlockAccum, sizeBlocks, hipMemcpyDeviceToHost); if(DEBUG) { std::cout << "ACCUM "; for(int idx = 0; idx != 4*numBlocks; ++idx) { std::cout << hostBlockAccum[idx] << " "; } std::cout << std::endl; hipDeviceSynchronize(); std::cout << "MEOWWWWWW " << hipGetErrorString(hipGetLastError()) << std::endl; } if(numBlocks != 1) { // std::partial_sum(2 2 3 4 3 2 2 ) TODO not using it get rid of header // goes to (2 4 7 11 14 16 18) // std::partial_sum(hostBlockAccum, hostBlockAccum + numBlocks, hostBlockAccum); for(int i = 4; i != 4*numBlocks; i += 4) { hostBlockAccum[i+0] += hostBlockAccum[i-4]; hostBlockAccum[i+1] += hostBlockAccum[i-3]; hostBlockAccum[i+2] += hostBlockAccum[i-2]; hostBlockAccum[i+3] += hostBlockAccum[i-1]; } // note: the last values in hostBlockAccum should contain total counts // The first block is done so it is ignored // and the last info in BlockAccum isn't needed (its the total counts) hipMemcpy(deviceBlockAccum, hostBlockAccum, sizeBlocks - 4 * sizeof(int), hipMemcpyHostToDevice); // TODO if(!validKernelSize(gridDim, blockDim)) std::cout << "GAHHHHHHHHHHHH GP2 ENGINE " << __LINE__ << std::endl; // TODO // Accumulate values from other blocks gridDim = make_uint3(1, 1, numBlocks - 1); hipLaunchKernelGGL(( pass3gpu_gridAccum), dim3(gridDim), dim3(blockDim), 0, 0, nx, ny, nz, triCounter, gridEdges, deviceBlockAccum); } // Allocate memory for points, normals and tris outputAllocated = true; numPoints = hostBlockAccum[4*(numBlocks-1) + 0] + hostBlockAccum[4*(numBlocks-1) + 1] + hostBlockAccum[4*(numBlocks-1) + 2]; numTris = hostBlockAccum[4*(numBlocks-1) + 3]; // hipMalloc(&points, 3*sizeof(scalar_t)*numPoints); // hipMalloc(&normals, 3*sizeof(scalar_t)*numPoints); // hipMalloc(&tris, 3*sizeof(int)*numTris); if(DEBUG) { std::cout << "numpoints" << numPoints << std::endl; std::cout << "numtris" << numTris << std::endl; } // free memory used in this function free(hostBlockAccum); hipFree(deviceBlockAccum); hipDeviceSynchronize(); if(DEBUG) { std::cout << "MEOWWWWWW " << hipGetErrorString(hipGetLastError()) << std::endl; } } /////////////////////////////////////////////////////////////////////////////// // Pass 4 of the algorithm /////////////////////////////////////////////////////////////////////////////// __device__ void computeGradient( int const& i, int const& j, int const& k, int const& nx, int const& ny, int const& nz, scalar_t* data, scalar_t* spacing, scalar_t* point) { scalar_t x0[2]; scalar_t x1[2]; scalar_t x2[2]; scalar_t run[3]; size_t dataIdx = k*nx*ny + j*nx + i; if (i == 0) { x0[0] = data[dataIdx + 1]; x0[1] = data[dataIdx]; run[0] = spacing[0]; } else if (i == (nx - 1)) { x0[0] = data[dataIdx]; x0[1] = data[dataIdx - 1]; run[0] = spacing[0]; } else { x0[0] = data[dataIdx + 1]; x0[1] = data[dataIdx - 1]; run[0] = 2 * spacing[0]; } if (j == 0) { x1[0] = data[dataIdx + nx]; x1[1] = data[dataIdx]; run[1] = spacing[1]; } else if (j == (ny - 1)) { x1[0] = data[dataIdx]; x1[1] = data[dataIdx - nx]; run[1] = spacing[1]; } else { x1[0] = data[dataIdx + nx]; x1[1] = data[dataIdx - ny]; run[1] = 2 * spacing[1]; } if (k == 0) { x2[0] = data[dataIdx + nx*ny]; x2[1] = data[dataIdx]; run[2] = spacing[2]; } else if (k == (nz - 1)) { x2[0] = data[dataIdx]; x2[1] = data[dataIdx - nx*ny]; run[2] = spacing[2]; } else { x2[0] = data[dataIdx + nx*ny]; x2[1] = data[dataIdx - nx*ny]; run[2] = 2 * spacing[2]; } point[0] = (x0[1] - x0[0]) / run[0]; point[1] = (x1[1] - x1[0]) / run[1]; point[2] = (x2[1] - x2[0]) / run[2]; } __device__ void getCubeInfo( int i, int j, int k, int nx, int ny, int nz, scalar_t* pointValues, scalar_t* zeroPos, scalar_t* spacing, scalar_t* pointCube, scalar_t* isovalCube, scalar_t* gradCube) { isovalCube[0] = pointValues[k*ny*nx + j*nx + i]; isovalCube[1] = pointValues[k*ny*nx + j*nx + i+1]; isovalCube[2] = pointValues[k*ny*nx + (j+1)*nx + i+1]; isovalCube[3] = pointValues[k*ny*nx + (j+1)*nx + i]; isovalCube[4] = pointValues[(k+1)*ny*nx + j*nx + i]; isovalCube[5] = pointValues[(k+1)*ny*nx + j*nx + i+1]; isovalCube[6] = pointValues[(k+1)*ny*nx + (j+1)*nx + (i+1)]; isovalCube[7] = pointValues[(k+1)*ny*nx + (j+1)*nx + i]; scalar_t xpos = zeroPos[0] + i * spacing[0]; scalar_t ypos = zeroPos[1] + j * spacing[1]; scalar_t zpos = zeroPos[2] + k * spacing[2]; pointCube[0*3 + 0] = xpos; pointCube[0*3 + 1] = ypos; pointCube[0*3 + 2] = zpos; pointCube[1*3 + 0] = xpos + spacing[0]; pointCube[1*3 + 1] = ypos; pointCube[1*3 + 2] = zpos; pointCube[2*3 + 0] = xpos + spacing[0]; pointCube[2*3 + 1] = ypos + spacing[1]; pointCube[2*3 + 2] = zpos; pointCube[3*3 + 0] = xpos; pointCube[3*3 + 1] = ypos + spacing[1]; pointCube[3*3 + 2] = zpos; pointCube[4*3 + 0] = xpos; pointCube[4*3 + 1] = ypos; pointCube[4*3 + 2] = zpos + spacing[2]; pointCube[5*3 + 0] = xpos + spacing[0]; pointCube[5*3 + 1] = ypos; pointCube[5*3 + 2] = zpos + spacing[2]; pointCube[6*3 + 0] = xpos + spacing[0]; pointCube[6*3 + 1] = ypos + spacing[1]; pointCube[6*3 + 2] = zpos + spacing[2]; pointCube[7*3 + 0] = xpos; pointCube[7*3 + 1] = ypos + spacing[1]; pointCube[7*3 + 2] = zpos + spacing[2]; computeGradient(i , j , k , nx, ny, nz, pointValues, spacing, gradCube + 3*0); computeGradient(i+1, j , k , nx, ny, nz, pointValues, spacing, gradCube + 3*1); computeGradient(i+1, j+1, k , nx, ny, nz, pointValues, spacing, gradCube + 3*2); computeGradient(i , j+1, k , nx, ny, nz, pointValues, spacing, gradCube + 3*3); computeGradient(i , j , k+1, nx, ny, nz, pointValues, spacing, gradCube + 3*4); computeGradient(i+1, j , k+1, nx, ny, nz, pointValues, spacing, gradCube + 3*5); computeGradient(i+1, j+1, k+1, nx, ny, nz, pointValues, spacing, gradCube + 3*6); computeGradient(i , j+1, k+1, nx, ny, nz, pointValues, spacing, gradCube + 3*7); } __device__ void interpolate( scalar_t const& weight, scalar_t* a, scalar_t* b, scalar_t* out) { out[0] = a[0] + (weight * (b[0] - a[0])); out[1] = a[1] + (weight * (b[1] - a[1])); out[2] = a[2] + (weight * (b[2] - a[2])); } __device__ void interpolateOnCube( uchar const& edge, scalar_t const& isoval, scalar_t* pts, scalar_t* isovals, scalar_t* out) { uchar i0 = cuda_util::edgeVertices[edge][0]; uchar i1 = cuda_util::edgeVertices[edge][1]; scalar_t weight = (isoval - isovals[i0]) / (isovals[i1] - isovals[i0]); interpolate(weight, pts + 3*i0, pts + 3*i1, out); } __global__ void pass4gpu_pointsAndNormals( int nx, int ny, int nz, scalar_t* pointValues, scalar_t* zeroPos, scalar_t* spacing, scalar_t isoval, FlyingEdgesAlgorithm::gridEdge* gridEdges, int* triCounter, uchar* cubeCases, scalar_t* points, scalar_t* normals, int* tris) { int j = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.y * blockDim.y + threadIdx.y; if(DEBUG) { if(j == 0 && k == 0) { // for(int i = 0; i != 3*1370424; ++i) // { // points[i] = -1; // normals[i] = -1; // } for(int i = 0; i != 3*2740864; ++i) tris[i] = -1; } } if(j >= ny-1 || k >= nz-1) return; FlyingEdgesAlgorithm::gridEdge& ge0 = gridEdges[k*ny + j]; FlyingEdgesAlgorithm::gridEdge& ge1 = gridEdges[k*ny + j+1]; FlyingEdgesAlgorithm::gridEdge& ge2 = gridEdges[(k+1)*ny + j]; FlyingEdgesAlgorithm::gridEdge& ge3 = gridEdges[(k+1)*ny + j+1]; int xl, xr; calcTrimValues(xl, xr, ge0, ge1, ge2, ge3); if(xl == xr) return; size_t triIdx = triCounter[k*(ny-1) + j]; uchar* curCubeCaseIds = cubeCases + (nx-1)*(k*(ny-1) + j); size_t x0counter = 0; size_t y0counter = 0; size_t z0counter = 0; size_t x1counter = 0; size_t z1counter = 0; size_t x2counter = 0; size_t y2counter = 0; size_t x3counter = 0; bool isYEnd = (j == ny-2); bool isZEnd = (k == nz-2); scalar_t pointCube[8*3]; scalar_t isovalCube[8]; scalar_t gradCube[8*3]; for(size_t i = xl; i != xr; ++i) { bool isXEnd = (i == nx-2); uchar caseId = curCubeCaseIds[i]; if(caseId == 0 || caseId == 255) { continue; } const bool* isCut = cuda_util::isCut[caseId]; // has 12 elements // Most of the information contained in pointCube, isovalCube // and gradCube will be used--but not necessarily all. It has // not been tested whether or not obtaining only the information // needed will provide a significant speedup--but // most likely not. // fill out pointCube, isovalCube and gradCube getCubeInfo(i, j, k, nx, ny, nz, pointValues, zeroPos, spacing, pointCube, isovalCube, gradCube); // Add Points and normals. // Calculate global indices for triangles int globalIdxs[12]; if(isCut[0]) { int idx = ge0.xstart + x0counter; interpolateOnCube(0, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(0, isoval, gradCube, isovalCube, normals + 3*idx); globalIdxs[0] = idx; ++x0counter; } if(isCut[3]) { int idx = ge0.ystart + y0counter; interpolateOnCube(3, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(3, isoval, gradCube, isovalCube, normals + 3*idx); globalIdxs[3] = idx; ++y0counter; } if(isCut[8]) { int idx = ge0.zstart + z0counter; interpolateOnCube(8, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(8, isoval, gradCube, isovalCube, normals + 3*idx); globalIdxs[8] = idx; ++z0counter; } // Note: // e1, e5, e9 and e11 will be visited in the next iteration // when they are e3, e7, e8 and 10 respectively. So don't // increment their counters. When the cube is an edge cube, // their counters don't need to be incremented because they // won't be used agin. // Manage boundary cases if needed. Otherwise just update // globalIdx. if(isCut[1]) { int idx = ge0.ystart + y0counter; if(isXEnd) { interpolateOnCube(1, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(1, isoval, gradCube, isovalCube, normals + 3*idx); // y0counter counter doesn't need to be incremented // because it won't be used again. } globalIdxs[1] = idx; } if(isCut[9]) { int idx = ge0.zstart + z0counter; if(isXEnd) { interpolateOnCube(9, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(9, isoval, gradCube, isovalCube, normals + 3*idx); // z0counter doesn't need to in incremented. } globalIdxs[9] = idx; } if(isCut[2]) { int idx = ge1.xstart + x1counter; if(isYEnd) { interpolateOnCube(2, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(2, isoval, gradCube, isovalCube, normals + 3*idx); } globalIdxs[2] = idx; ++x1counter; } if(isCut[10]) { int idx = ge1.zstart + z1counter; if(isYEnd) { interpolateOnCube(10, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(10, isoval, gradCube, isovalCube, normals + 3*idx); } globalIdxs[10] = idx; ++z1counter; } if(isCut[4]) { int idx = ge2.xstart + x2counter; if(isZEnd) { interpolateOnCube(4, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(4, isoval, gradCube, isovalCube, normals + 3*idx); } globalIdxs[4] = idx; ++x2counter; } if(isCut[7]) { int idx = ge2.ystart + y2counter; if(isZEnd) { interpolateOnCube(7, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(7, isoval, gradCube, isovalCube, normals + 3*idx); } globalIdxs[7] = idx; ++y2counter; } if(isCut[11]) { int idx = ge1.zstart + z1counter; if(isXEnd and isYEnd) { interpolateOnCube(11, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(11, isoval, gradCube, isovalCube, normals + 3*idx); // z1counter does not need to be incremented. } globalIdxs[11] = idx; } if(isCut[5]) { int idx = ge2.ystart + y2counter; if(isXEnd and isZEnd) { interpolateOnCube(5, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(5, isoval, gradCube, isovalCube, normals + 3*idx); // y2 counter does not need to be incremented. } globalIdxs[5] = idx; } if(isCut[6]) { int idx = ge3.xstart + x3counter; if(isYEnd and isZEnd) { interpolateOnCube(6, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(6, isoval, gradCube, isovalCube, normals + 3*idx); } globalIdxs[6] = idx; ++x3counter; } // Add triangles const char* caseTri = cuda_util::caseTriangles[caseId]; // size 16 for(int idx = 0; caseTri[idx] != -1; idx += 3) { tris[3*triIdx + 0] = i; tris[3*triIdx + 1] = j; tris[3*triIdx + 2] = k; // tris[3*triIdx + 0] = globalIdxs[caseTri[idx]]; // tris[3*triIdx + 1] = globalIdxs[caseTri[idx+1]]; // tris[3*triIdx + 2] = globalIdxs[caseTri[idx+2]]; // ++triIdx; } } } void FlyingEdgesAlgorithm::pass4() { // pass4 calculates points and normals // 1) points and normals // 1st kernel: Calculate the main cube rays // 2nd and third kernel: int ty = 1;//FE_BLOCK_WIDTH_Y / 2; // divide by 2? TODO figure out this problem.. int tz = 1;//FE_BLOCK_WIDTH_Z / 2; // gah.... uint3 gridDim = make_uint3(((ny-1) + ty - 1) / ty, ((nz-1) + tz - 1) / tz, 1); uint3 blockDim = make_uint3(ty, tz, 1); std::cout << gridDim.x << ", " << gridDim.y << ", " << gridDim.z << std::endl; std::cout << blockDim.x << ", " << blockDim.y << ", " << blockDim.z << std::endl; if(!validKernelSize(gridDim, blockDim)) std::cout << "GAHHHHHHHHHHHH GP2 ENGINE " << __LINE__ << std::endl; // TODO if(DEBUG) { hipDeviceSynchronize(); } hipLaunchKernelGGL(( pass4gpu_pointsAndNormals), dim3(gridDim), dim3(blockDim), 0, 0, nx, ny, nz, // input pointValues, zeroPos, spacing, // input isoval, // input gridEdges, triCounter, cubeCases, // input points, normals, tris); // output if(DEBUG) { hipDeviceSynchronize(); std::cout << "MEOWWWWWW " << hipGetErrorString(hipGetLastError()) << std::endl; } if(DEBUG) { size_t sz = 3 * numPoints * sizeof(scalar_t); scalar_t* hostPts = (scalar_t*)malloc(sz); scalar_t* hostNrs = (scalar_t*)malloc(sz); int* hostTrs = (int*)malloc(3*numTris*sizeof(int)); hipMemcpy(hostPts, points, sz, hipMemcpyDeviceToHost); hipMemcpy(hostNrs, normals, sz, hipMemcpyDeviceToHost); hipMemcpy(hostTrs, tris, 3*numTris*sizeof(int), hipMemcpyDeviceToHost); scalar_t accumP = 0.0; for(int idx = 0; idx != 3 * numPoints; ++idx) { accumP += hostPts[idx]; accumP += hostTrs[idx]; while(accumP >= 1000000) accumP -= 1000000; } int accumT = 0; int num0 = -1; int num9 = 0; int num8 = 0; int num7 = 0; int numSetPoints = 0; for(int idx = 0; idx != 3 * numPoints; ++idx) { if(hostPts[idx] != -1) numSetPoints += 1; } std::cout << "numSetPoints " << numSetPoints << std::endl; for(int idx = 0; idx != 3 * numTris; ++idx) { if(hostTrs[idx] == 0) num0 += 1; if(hostTrs[idx] == 9) num9 += 1; if(hostTrs[idx] == 8) num8 += 1; if(hostTrs[idx] == 7) num7 += 1; accumT += hostTrs[idx]; while(accumT >= 1000000) accumT -= 1000000; } std::cout << "pass 4 hashsum " << accumP << ", " << accumT << std::endl; std::cout << "num0 in Tris " << num0 << std::endl; std::cout << "num9 in Tris " << num9 << std::endl; std::cout << "num8 in Tris " << num8 << std::endl; std::cout << "num7 in Tris " << num7 << std::endl; for(int idx = 0; idx != numTris*3; idx += 3) { if(hostTrs[idx] != -1) { std::cout << hostTrs[idx+0] << ", " << hostTrs[idx+1] << ", " << hostTrs[idx+2] << std::endl; } } free(hostPts); free(hostNrs); free(hostTrs); } }
60b2152366514114cb95046d0ac401105a8a6d76.cu
/* * flyingEdgesAlgorithm.cpp * * Created on: Feb 17, 2017 * Author: dbourge */ #include "FlyingEdgesAlgorithm.h" #include "CudaMarchingCubesTables.h" #include <numeric> #include <algorithm> // TODO #include <iostream> // TODO #define MAX_X_GRID 65535 // says larger but doesnt work if larger.. #define MAX_Y_GRID 65535 #define MAX_Z_GRID 65535 #define MAX_X_BLOCK 1024 #define MAX_Y_BLOCK 1024 #define MAX_Z_BLOCK 64 #define MAX_THREAD_PER_BLOCK 1024 #define DEBUG true // TODO figure out how to handle errors bool validKernelSize(uint3 const& gridDim, uint3 const& blockDim) { if(gridDim.x > MAX_X_GRID) return false; if(gridDim.y > MAX_Y_GRID) return false; if(gridDim.z > MAX_Z_GRID) return false; if(blockDim.x > MAX_X_BLOCK) return false; if(blockDim.y > MAX_Y_BLOCK) return false; if(blockDim.z > MAX_Z_BLOCK) return false; if(blockDim.x * blockDim.y * blockDim.z > MAX_THREAD_PER_BLOCK) return false; return true; } // TODO make sure pointValues stored in const memory /////////////////////////////////////////////////////////////////////////////// // Pass 1 of the algorithm /////////////////////////////////////////////////////////////////////////////// __device__ uchar calcCaseEdge( bool const& prevEdge, bool const& currEdge) { // o -- is greater than or equal to // case 0: (i-1) o-----o (i) | (_,j,k) // case 1: (i-1) x-----o (i) | (_,j+1,k) // case 2: (i-1) o-----x (i) | (_,j,k+1) // case 3: (i-1) x-----x (i) | (_,j+1,k+1) if(prevEdge && currEdge) return 0; if(!prevEdge && currEdge) return 1; if(prevEdge && !currEdge) return 2; else // !prevEdge && !currEdge return 3; } __global__ void pass1gpu_edgeCases( scalar_t* pointValues, scalar_t isoval, int nx, int ny, uchar* edgeCases) { // Each row has several blocks // Each thread is one point int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y; int k = blockIdx.z; __shared__ bool isGE[FE_BLOCK_WIDTH_PLUS_ONE]; if(i < nx) isGE[threadIdx.x] = pointValues[k*nx*ny + j*nx + i] >= isoval; if(threadIdx.x == 0 && i < nx-1) { isGE[blockDim.x] = pointValues[k*nx*ny + j*nx + i + blockDim.x] >= isoval; } __syncthreads(); if(i < nx-1) { uchar caseEdge = calcCaseEdge(isGE[threadIdx.x], isGE[threadIdx.x + 1]); edgeCases[k*(nx-1)*ny + j*(nx-1) + i] = caseEdge; } } __global__ void pass1gpu_trim( int nx, int ny, int nz, // input uchar* edgeCases, // input FlyingEdgesAlgorithm::gridEdge* gridEdges) // output { int j = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.y * blockDim.y + threadIdx.y; if(j >= ny || k >= nz) return; size_t xl = nx; size_t xr = 0; uchar* curEdgeCases = edgeCases + k*(nx-1)*ny + j*(nx-1); for(int i = 0; i != nx-1; ++i) { if(curEdgeCases[i] == 1 || curEdgeCases[i] == 2) { if(xl == nx) xl = i; xr = i+1; } } gridEdges[k*ny + j].xl = xl; gridEdges[k*ny + j].xr = xr; } void FlyingEdgesAlgorithm::pass1() { int tx = FE_BLOCK_WIDTH; uint3 gridDim = make_uint3(((nx-1) + tx - 1) / tx, ny, nz); uint3 blockDim = make_uint3(tx, 1, 1); if(!validKernelSize(gridDim, blockDim)) std::cout << "GAHHHHHHHHHHHH GP2 ENGINE " << __LINE__ << std::endl; // TODO pass1gpu_edgeCases<<<gridDim, blockDim>>>( pointValues, isoval, nx, ny, edgeCases); int ty = FE_BLOCK_WIDTH_Y; int tz = FE_BLOCK_WIDTH_Z; gridDim = make_uint3((ny + ty - 1) / ty, (nz + tz - 1) / tz, 1); blockDim = make_uint3(ty, tz, 1); if(!validKernelSize(gridDim, blockDim)) std::cout << "GAHHHHHHHHHHHH GP2 ENGINE " << __LINE__ << std::endl; // TODO pass1gpu_trim<<<gridDim, blockDim>>>( nx, ny, nz, edgeCases, gridEdges); cudaDeviceSynchronize(); if(DEBUG) { int numGE = nz*ny; gridEdge* hostGEs = (gridEdge*)malloc(numGE*sizeof(gridEdge)); cudaMemcpy(hostGEs, gridEdges, numGE*sizeof(gridEdge), cudaMemcpyDeviceToHost); int numCubes=(nx-1)*ny*nz; size_t count = 0; uchar* hoseEdgeCases = (uchar*)malloc(numCubes*sizeof(uchar)); cudaMemcpy(hoseEdgeCases, edgeCases, numCubes*sizeof(uchar), cudaMemcpyDeviceToHost); for(int idx = 0; idx != numCubes; ++idx) { uchar const& val = hoseEdgeCases[idx]; count += val; } std::cout << "Edgecase counter: " << count << std::endl; free(hoseEdgeCases); size_t countL = 0; size_t countR = 0; for(int idx = 0; idx != numGE; ++idx) { countL += hostGEs[idx].xl; countR += hostGEs[idx].xr; } std::cout << "xl, xr: " << countL << ", " << countR << std::endl; free(hostGEs); } } /////////////////////////////////////////////////////////////////////////////// // Pass 2 of the algorithm /////////////////////////////////////////////////////////////////////////////// __device__ void calcTrimValues( int& xl, int& xr, FlyingEdgesAlgorithm::gridEdge const& ge0, FlyingEdgesAlgorithm::gridEdge const& ge1, FlyingEdgesAlgorithm::gridEdge const& ge2, FlyingEdgesAlgorithm::gridEdge const& ge3) { xl = min(ge0.xl, min(ge1.xl, min(ge2.xl, ge3.xl))); xr = max(ge0.xr, max(ge1.xr, max(ge2.xr, ge3.xr))); if(xl > xr) xl = xr; } __device__ uchar calcCubeCase( uchar const& ec0, uchar const& ec1, uchar const& ec2, uchar const& ec3) { // ec0 | (_,j,k) // ec1 | (_,j+1,k) // ec2 | (_,j,k+1) // ec3 | (_,j+1,k+1) uchar caseId = 0; if((ec0 == 0) || (ec0 == 2)) // 0 | (i,j,k) caseId |= 1; if((ec0 == 0) || (ec0 == 1)) // 1 | (i+1,j,k) caseId |= 2; if((ec1 == 0) || (ec1 == 1)) // 2 | (i+1,j+1,k) caseId |= 4; if((ec1 == 0) || (ec1 == 2)) // 3 | (i,j+1,k) caseId |= 8; if((ec2 == 0) || (ec2 == 2)) // 4 | (i,j,k+1) caseId |= 16; if((ec2 == 0) || (ec2 == 1)) // 5 | (i+1,j,k+1) caseId |= 32; if((ec3 == 0) || (ec3 == 1)) // 6 | (i+1,j+1,k+1) caseId |= 64; if((ec3 == 0) || (ec3 == 2)) // 7 | (i,j+1,k+1) caseId |= 128; return caseId; } __global__ void pass2gpu_cubeCases( int nx, int ny, int nz, uchar* edgeCases, FlyingEdgesAlgorithm::gridEdge* gridEdges, int* triCounter, uchar* cubeCases) { int j = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.y * blockDim.y + threadIdx.y; if(j >= ny-1 || k >= nz-1) return; FlyingEdgesAlgorithm::gridEdge& ge0 = gridEdges[k*ny + j]; FlyingEdgesAlgorithm::gridEdge& ge1 = gridEdges[k*ny + j + 1]; FlyingEdgesAlgorithm::gridEdge& ge2 = gridEdges[(k+1)*ny + j]; FlyingEdgesAlgorithm::gridEdge& ge3 = gridEdges[(k+1)*ny + j + 1]; uchar* ec0 = edgeCases + k*ny*(nx-1) + j*(nx-1); uchar* ec1 = edgeCases + k*ny*(nx-1) + (j+1)*(nx-1); uchar* ec2 = edgeCases + (k+1)*ny*(nx-1) + j*(nx-1); uchar* ec3 = edgeCases + (k+1)*ny*(nx-1) + (j+1)*(nx-1); int xl, xr; calcTrimValues(xl, xr, ge0, ge1, ge2, ge3); int triCount = 0; uchar* curCubeCases = cubeCases + k*(nx-1)*(ny-1) + j*(nx-1); int xstart = 0; int ystart = 0; int zstart = 0; // TODO don't set initial values in gridEdge Constructor; const bool* isCut; for(int i = xl; i != xr; ++i) // What happens here on a gpu? // I imagine it takes the max xr-xl of all blocks { uchar caseId = calcCubeCase(ec0[i], ec1[i], ec2[i], ec3[i]); curCubeCases[i] = caseId; // Can't imagine this would do anything on a gpu unless all threads // on a block evaluated to the same value. if(caseId == 0 || caseId == 255) { continue; } triCount += cuda_util::numTris[caseId]; isCut = cuda_util::isCut[caseId]; // if xr == nx-1, then xr-1 is cut // so this will be set xstart += isCut[0]; ystart += isCut[3]; zstart += isCut[8]; } triCounter[k*(ny-1) + j] = triCount; if(xr == nx-1) { // isCut was set at i = xr-1 ystart += isCut[1]; zstart += isCut[9]; } ge0.xstart = xstart; ge0.ystart = ystart; ge0.zstart = zstart; } __global__ void pass2gpu_ghost_xz( int nx, int ny, int nz, uchar* edgeCases, FlyingEdgesAlgorithm::gridEdge* gridEdges) { int k = blockIdx.x * blockDim.x + threadIdx.x; if(k >= nz) // This function will deal with gridEdge at (_, ny-1, nz-1) return; bool isCorner = k == nz-1; int j = ny-1; FlyingEdgesAlgorithm::gridEdge& ge0 = gridEdges[k*ny + j]; // If isCorner, this is just bogus. FlyingEdgesAlgorithm::gridEdge& ge1 = gridEdges[(1-isCorner)*(k+1)*ny + j]; uchar* ec0 = edgeCases + k*ny*(nx-1) + j*(nx-1); // If isCorner, this is just bogus uchar* ec1 = edgeCases + (1-isCorner)*(k+1)*ny*(nx-1) + j*(nx-1); int xl = min(ge0.xl, nx*isCorner + (1-isCorner)*ge1.xl); int xr = max(ge0.xr, (1-isCorner)*ge1.xr); int xstart = 0; int zstart = 0; // TODO don't set initial values in gridEdge Constructor; uchar c0; uchar c1; for(int i = xl; i != xr; ++i) { c0 = ec0[i]; c1 = ec1[i]; // see if the edges are cut xstart += (c0 == 1 || c0 == 2); // bogus if isCorner zstart += ( (c0 == 0 && c1 == 1) || (c0 == 0 && c1 == 3) || (c0 == 1 && c1 == 2) || (c0 == 2 && c1 == 3) ); } if(xr == nx-1) { // bogus if isCorner zstart += ( (c0 == 0 && c1 == 2) || (c0 == 0 && c1 == 3) || (c0 == 1 && c1 == 2) || (c0 == 1 && c1 == 3) ); } ge0.xstart = xstart; ge0.ystart = 0; ge0.zstart = zstart*(1-isCorner); } __global__ void pass2gpu_ghost_xy( int nx, int ny, int nz, uchar* edgeCases, FlyingEdgesAlgorithm::gridEdge* gridEdges) { int j = blockIdx.x * blockDim.x + threadIdx.x; if(j >= ny-1) return; int k = nz-1; FlyingEdgesAlgorithm::gridEdge& ge0 = gridEdges[k*ny + j]; FlyingEdgesAlgorithm::gridEdge& ge1 = gridEdges[k*ny + j + 1]; uchar* ec0 = edgeCases + k*ny*(nx-1) + j*(nx-1); uchar* ec1 = edgeCases + k*ny*(nx-1) + (j+1)*(nx-1); int xl = min(ge0.xl, ge1.xl); int xr = max(ge0.xr, ge1.xr); if(xl >= xr) return; int xstart = 0; int ystart = 0; // TODO don't set initial values in gridEdge Constructor; uchar c0; uchar c1; for(int i = xl; i != xr; ++i) { c0 = ec0[i]; c1 = ec1[i]; // see if the edges are cut xstart += (c0 == 1 || c0 == 2); ystart += ( (c0 == 0 && c1 == 1) || (c0 == 0 && c1 == 3) || (c0 == 1 && c1 == 2) || (c0 == 2 && c1 == 3) ); } if(xr == nx-1) { ystart += ( (c0 == 0 && c1 == 2) || (c0 == 0 && c1 == 3) || (c0 == 1 && c1 == 2) || (c0 == 1 && c1 == 3) ); } ge0.xstart = xstart; ge0.ystart = ystart; ge0.zstart = 0; } void FlyingEdgesAlgorithm::pass2() { // pass2 calculates // 1) cubeCases for each block ray // 2) triCount for each block ray // 3) edgeRay count // 1st kernel: Calculate the 0, 1, 2 edge ray, cube cases, tricount // 2nd kernel: Calculate lost edges int ty = FE_BLOCK_WIDTH_Y; int tz = FE_BLOCK_WIDTH_Z; uint3 gridDim = make_uint3(((ny-1) + ty - 1) / ty, ((nz-1) + tz - 1) / tz, 1); uint3 blockDim = make_uint3(ty, tz, 1); if(!validKernelSize(gridDim, blockDim)) std::cout << "GAHHHHHHHHHHHH GP2 ENGINE " << __LINE__ << std::endl; // TODO pass2gpu_cubeCases<<<gridDim, blockDim>>>( nx, ny, nz, edgeCases, gridEdges, // modified triCounter, // modified cubeCases); // modified // POSSIBLE to do this here TODO // cudaFree(edgeCases); if(DEBUG) { std::cout << "MEOWWWWWW " << cudaGetErrorString(cudaGetLastError()) << std::endl; } if(DEBUG) { size_t sz = (nx-1)*(ny-1)*(nz-1)*sizeof(uchar); cudaDeviceSynchronize(); uchar* hostCubeCases = (uchar*)malloc(sz); cudaMemcpy(hostCubeCases, cubeCases, sz, cudaMemcpyDeviceToHost); int count = 0; // TODO hostCubeCases is not the same every time. for(int i = 0; i != (nx-1)*(ny-1)*(nz-1); ++i) { if(hostCubeCases[i] != 0 && hostCubeCases[i] != 255) count += hostCubeCases[i]; } std::cout << "Count cube cases " << count << std::endl; free(hostCubeCases); } // TODO these can be launched and executed independently of each other int bw = FE_BLOCK_WIDTH; // Making sure that the xz face takes care of the (_, ny-1, nz-1) gridEdge // BE CAREFUL. xz takes care of corner. don't use (nz-1) pass2gpu_ghost_xz<<<(nz + bw - 1) / bw, bw>>>( nx, ny, nz, edgeCases, gridEdges); pass2gpu_ghost_xy<<<((ny-1) + bw - 1) / bw, bw>>>( nx, ny, nz, edgeCases, gridEdges); cudaDeviceSynchronize(); std::cout << "MEOWWWWWW " << cudaGetErrorString(cudaGetLastError()) << std::endl; if(DEBUG) { size_t sz_ge = nx*ny*sizeof(gridEdge); gridEdge* hostges = (gridEdge*)malloc(sz_ge); auto w = cudaMemcpy(hostges, gridEdges, sz_ge, cudaMemcpyDeviceToHost); if(w != cudaSuccess) { std::cout << "GHASDCFAKSCLKASCKAS:CKASL:CKAS:DLCKASD:" << std::endl; std::cout << cudaGetErrorString(w) << std::endl; } int sumxstart = 0; for(int idx = 0; idx != nx*ny; ++idx) { sumxstart += hostges[idx].xstart; } std::cout << "sumxstart " << sumxstart << std::endl; free(hostges); } } /////////////////////////////////////////////////////////////////////////////// // Pass 3 of the algorithm /////////////////////////////////////////////////////////////////////////////// __global__ void pass3gpu_blockAccum( int nx, int ny, int nz, // which are needed TODO? int* triCounter, FlyingEdgesAlgorithm::gridEdge* gridEdges, int* blockAccum) { int k = blockIdx.y * blockDim.y + threadIdx.y; // step 1: accumulate individual y thread // step 2: calc block sum // step 3: __syncthreads // step 4: add to individual y thread __shared__ int accum[4*FE_BLOCK_WIDTH]; if(k < nz) { int tmp; int accumX = 0; int accumY = 0; int accumZ = 0; int accumTri = 0; for(int j = 0; j != ny; ++j) { FlyingEdgesAlgorithm::gridEdge& ge = gridEdges[k*ny + j]; tmp = ge.xstart; ge.xstart = accumX; accumX += tmp; tmp = ge.ystart; ge.ystart = accumY; accumY += tmp; tmp = ge.zstart; ge.zstart = accumZ; accumZ += tmp; } if(k < nz-1) { for(int j = 0; j != ny-1; ++j) { int& curTriCount = triCounter[k*(ny-1) + j]; tmp = curTriCount; curTriCount = accumTri; accumTri += tmp; } } accum[4*threadIdx.y + 0] = accumX; accum[4*threadIdx.y + 1] = accumY; accum[4*threadIdx.y + 2] = accumZ; accum[4*threadIdx.y + 3] = accumTri; } __syncthreads(); if(k < nz) { if(threadIdx.y == 0) // agh! { for(int idx = 1; idx != blockDim.y; ++idx) { accum[4*idx + 0] += accum[4*(idx-1) + 0]; accum[4*idx + 1] += accum[4*(idx-1) + 1]; accum[4*idx + 2] += accum[4*(idx-1) + 2]; accum[4*idx + 3] += accum[4*(idx-1) + 3]; } // answer for global accumulation blockAccum[4*blockIdx.y + 0] = accum[4*(blockDim.y-1) + 0]; blockAccum[4*blockIdx.y + 1] = accum[4*(blockDim.y-1) + 1]; blockAccum[4*blockIdx.y + 2] = accum[4*(blockDim.y-1) + 2]; blockAccum[4*blockIdx.y + 3] = accum[4*(blockDim.y-1) + 3]; } } __syncthreads(); if(threadIdx.y == 0 || k >= nz) return; bool isEndK = k == nz-1; for(int j = 0; j != ny-1; ++j) { FlyingEdgesAlgorithm::gridEdge& ge = gridEdges[k*ny + j]; ge.xstart += accum[4*(threadIdx.y-1) + 0]; ge.ystart += accum[4*(threadIdx.y-1) + 1]; ge.zstart += accum[4*(threadIdx.y-1) + 2]; // put z stuff here.. if(!isEndK) triCounter[k*(ny-1) + j] = accum[4*(threadIdx.y-1) + 3]; } FlyingEdgesAlgorithm::gridEdge& ge = gridEdges[k*ny + (ny-1)]; ge.xstart += accum[4*(threadIdx.y-1) + 0]; ge.ystart += accum[4*(threadIdx.y-1) + 1]; ge.zstart += accum[4*(threadIdx.y-1) + 2]; } __global__ // TODO can split up along j here easy enough. void pass3gpu_gridAccum( int nx, int ny, int nz, // which are needed TODO? int* triCounter, FlyingEdgesAlgorithm::gridEdge* gridEdges, int* blockAccum) // used as input here { // not adding to the first block! // // add to individual y threads int k = (blockIdx.z + 1)*blockDim.z + threadIdx.z; if (k >= nz) return; int addX = blockAccum[4*blockIdx.z + 0]; int addY = blockAccum[4*blockIdx.z + 1]; int addZ = blockAccum[4*blockIdx.z + 2]; int addTri = blockAccum[4*blockIdx.z + 3]; for(int j = 0; j != ny; ++j) { FlyingEdgesAlgorithm::gridEdge& ge = gridEdges[k*ny + j]; ge.xstart += addX; ge.ystart += addY; ge.zstart += addZ; } if(k >= nz-1) return; for(int j = 0; j != ny-1; ++j) { triCounter[k*(ny-1) + j] += addTri; } } // Can make prettier? void FlyingEdgesAlgorithm::pass3() { // Split the z axis // Kernel 1: calculate the accum values on block sync // then accum individual values // Use that info accum each block (except the first one) // Kernel 2: just add values to individual threads int tz = FE_BLOCK_WIDTH; int numBlocks = (nz + tz - 1) / tz; // there are four because: xstart, ystart, zstart, triaccum int sizeBlocks = 4 * numBlocks * sizeof(int); uint3 gridDim = make_uint3(1, numBlocks, 1); uint3 blockDim = make_uint3(1, tz, 1); int* hostBlockAccum = (int*)malloc(sizeBlocks); for(int idx = 0; idx != 4*numBlocks; ++idx) { hostBlockAccum[idx] = 0; } int* deviceBlockAccum; cudaMalloc(&deviceBlockAccum, sizeBlocks); cudaMemcpy(deviceBlockAccum, hostBlockAccum, sizeBlocks, cudaMemcpyHostToDevice); // Accumulate values locally if(!validKernelSize(gridDim, blockDim)) std::cout << "GAHHHHHHHHHHHH GP2 ENGINE " << __LINE__ << std::endl; // TODO pass3gpu_blockAccum<<<gridDim, blockDim>>>( nx, ny, nz, triCounter, gridEdges, deviceBlockAccum); cudaMemcpy(hostBlockAccum, deviceBlockAccum, sizeBlocks, cudaMemcpyDeviceToHost); if(DEBUG) { std::cout << "ACCUM "; for(int idx = 0; idx != 4*numBlocks; ++idx) { std::cout << hostBlockAccum[idx] << " "; } std::cout << std::endl; cudaDeviceSynchronize(); std::cout << "MEOWWWWWW " << cudaGetErrorString(cudaGetLastError()) << std::endl; } if(numBlocks != 1) { // std::partial_sum(2 2 3 4 3 2 2 ) TODO not using it get rid of header // goes to (2 4 7 11 14 16 18) // std::partial_sum(hostBlockAccum, hostBlockAccum + numBlocks, hostBlockAccum); for(int i = 4; i != 4*numBlocks; i += 4) { hostBlockAccum[i+0] += hostBlockAccum[i-4]; hostBlockAccum[i+1] += hostBlockAccum[i-3]; hostBlockAccum[i+2] += hostBlockAccum[i-2]; hostBlockAccum[i+3] += hostBlockAccum[i-1]; } // note: the last values in hostBlockAccum should contain total counts // The first block is done so it is ignored // and the last info in BlockAccum isn't needed (its the total counts) cudaMemcpy(deviceBlockAccum, hostBlockAccum, sizeBlocks - 4 * sizeof(int), cudaMemcpyHostToDevice); // TODO if(!validKernelSize(gridDim, blockDim)) std::cout << "GAHHHHHHHHHHHH GP2 ENGINE " << __LINE__ << std::endl; // TODO // Accumulate values from other blocks gridDim = make_uint3(1, 1, numBlocks - 1); pass3gpu_gridAccum<<<gridDim, blockDim>>>( nx, ny, nz, triCounter, gridEdges, deviceBlockAccum); } // Allocate memory for points, normals and tris outputAllocated = true; numPoints = hostBlockAccum[4*(numBlocks-1) + 0] + hostBlockAccum[4*(numBlocks-1) + 1] + hostBlockAccum[4*(numBlocks-1) + 2]; numTris = hostBlockAccum[4*(numBlocks-1) + 3]; // cudaMalloc(&points, 3*sizeof(scalar_t)*numPoints); // cudaMalloc(&normals, 3*sizeof(scalar_t)*numPoints); // cudaMalloc(&tris, 3*sizeof(int)*numTris); if(DEBUG) { std::cout << "numpoints" << numPoints << std::endl; std::cout << "numtris" << numTris << std::endl; } // free memory used in this function free(hostBlockAccum); cudaFree(deviceBlockAccum); cudaDeviceSynchronize(); if(DEBUG) { std::cout << "MEOWWWWWW " << cudaGetErrorString(cudaGetLastError()) << std::endl; } } /////////////////////////////////////////////////////////////////////////////// // Pass 4 of the algorithm /////////////////////////////////////////////////////////////////////////////// __device__ void computeGradient( int const& i, int const& j, int const& k, int const& nx, int const& ny, int const& nz, scalar_t* data, scalar_t* spacing, scalar_t* point) { scalar_t x0[2]; scalar_t x1[2]; scalar_t x2[2]; scalar_t run[3]; size_t dataIdx = k*nx*ny + j*nx + i; if (i == 0) { x0[0] = data[dataIdx + 1]; x0[1] = data[dataIdx]; run[0] = spacing[0]; } else if (i == (nx - 1)) { x0[0] = data[dataIdx]; x0[1] = data[dataIdx - 1]; run[0] = spacing[0]; } else { x0[0] = data[dataIdx + 1]; x0[1] = data[dataIdx - 1]; run[0] = 2 * spacing[0]; } if (j == 0) { x1[0] = data[dataIdx + nx]; x1[1] = data[dataIdx]; run[1] = spacing[1]; } else if (j == (ny - 1)) { x1[0] = data[dataIdx]; x1[1] = data[dataIdx - nx]; run[1] = spacing[1]; } else { x1[0] = data[dataIdx + nx]; x1[1] = data[dataIdx - ny]; run[1] = 2 * spacing[1]; } if (k == 0) { x2[0] = data[dataIdx + nx*ny]; x2[1] = data[dataIdx]; run[2] = spacing[2]; } else if (k == (nz - 1)) { x2[0] = data[dataIdx]; x2[1] = data[dataIdx - nx*ny]; run[2] = spacing[2]; } else { x2[0] = data[dataIdx + nx*ny]; x2[1] = data[dataIdx - nx*ny]; run[2] = 2 * spacing[2]; } point[0] = (x0[1] - x0[0]) / run[0]; point[1] = (x1[1] - x1[0]) / run[1]; point[2] = (x2[1] - x2[0]) / run[2]; } __device__ void getCubeInfo( int i, int j, int k, int nx, int ny, int nz, scalar_t* pointValues, scalar_t* zeroPos, scalar_t* spacing, scalar_t* pointCube, scalar_t* isovalCube, scalar_t* gradCube) { isovalCube[0] = pointValues[k*ny*nx + j*nx + i]; isovalCube[1] = pointValues[k*ny*nx + j*nx + i+1]; isovalCube[2] = pointValues[k*ny*nx + (j+1)*nx + i+1]; isovalCube[3] = pointValues[k*ny*nx + (j+1)*nx + i]; isovalCube[4] = pointValues[(k+1)*ny*nx + j*nx + i]; isovalCube[5] = pointValues[(k+1)*ny*nx + j*nx + i+1]; isovalCube[6] = pointValues[(k+1)*ny*nx + (j+1)*nx + (i+1)]; isovalCube[7] = pointValues[(k+1)*ny*nx + (j+1)*nx + i]; scalar_t xpos = zeroPos[0] + i * spacing[0]; scalar_t ypos = zeroPos[1] + j * spacing[1]; scalar_t zpos = zeroPos[2] + k * spacing[2]; pointCube[0*3 + 0] = xpos; pointCube[0*3 + 1] = ypos; pointCube[0*3 + 2] = zpos; pointCube[1*3 + 0] = xpos + spacing[0]; pointCube[1*3 + 1] = ypos; pointCube[1*3 + 2] = zpos; pointCube[2*3 + 0] = xpos + spacing[0]; pointCube[2*3 + 1] = ypos + spacing[1]; pointCube[2*3 + 2] = zpos; pointCube[3*3 + 0] = xpos; pointCube[3*3 + 1] = ypos + spacing[1]; pointCube[3*3 + 2] = zpos; pointCube[4*3 + 0] = xpos; pointCube[4*3 + 1] = ypos; pointCube[4*3 + 2] = zpos + spacing[2]; pointCube[5*3 + 0] = xpos + spacing[0]; pointCube[5*3 + 1] = ypos; pointCube[5*3 + 2] = zpos + spacing[2]; pointCube[6*3 + 0] = xpos + spacing[0]; pointCube[6*3 + 1] = ypos + spacing[1]; pointCube[6*3 + 2] = zpos + spacing[2]; pointCube[7*3 + 0] = xpos; pointCube[7*3 + 1] = ypos + spacing[1]; pointCube[7*3 + 2] = zpos + spacing[2]; computeGradient(i , j , k , nx, ny, nz, pointValues, spacing, gradCube + 3*0); computeGradient(i+1, j , k , nx, ny, nz, pointValues, spacing, gradCube + 3*1); computeGradient(i+1, j+1, k , nx, ny, nz, pointValues, spacing, gradCube + 3*2); computeGradient(i , j+1, k , nx, ny, nz, pointValues, spacing, gradCube + 3*3); computeGradient(i , j , k+1, nx, ny, nz, pointValues, spacing, gradCube + 3*4); computeGradient(i+1, j , k+1, nx, ny, nz, pointValues, spacing, gradCube + 3*5); computeGradient(i+1, j+1, k+1, nx, ny, nz, pointValues, spacing, gradCube + 3*6); computeGradient(i , j+1, k+1, nx, ny, nz, pointValues, spacing, gradCube + 3*7); } __device__ void interpolate( scalar_t const& weight, scalar_t* a, scalar_t* b, scalar_t* out) { out[0] = a[0] + (weight * (b[0] - a[0])); out[1] = a[1] + (weight * (b[1] - a[1])); out[2] = a[2] + (weight * (b[2] - a[2])); } __device__ void interpolateOnCube( uchar const& edge, scalar_t const& isoval, scalar_t* pts, scalar_t* isovals, scalar_t* out) { uchar i0 = cuda_util::edgeVertices[edge][0]; uchar i1 = cuda_util::edgeVertices[edge][1]; scalar_t weight = (isoval - isovals[i0]) / (isovals[i1] - isovals[i0]); interpolate(weight, pts + 3*i0, pts + 3*i1, out); } __global__ void pass4gpu_pointsAndNormals( int nx, int ny, int nz, scalar_t* pointValues, scalar_t* zeroPos, scalar_t* spacing, scalar_t isoval, FlyingEdgesAlgorithm::gridEdge* gridEdges, int* triCounter, uchar* cubeCases, scalar_t* points, scalar_t* normals, int* tris) { int j = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.y * blockDim.y + threadIdx.y; if(DEBUG) { if(j == 0 && k == 0) { // for(int i = 0; i != 3*1370424; ++i) // { // points[i] = -1; // normals[i] = -1; // } for(int i = 0; i != 3*2740864; ++i) tris[i] = -1; } } if(j >= ny-1 || k >= nz-1) return; FlyingEdgesAlgorithm::gridEdge& ge0 = gridEdges[k*ny + j]; FlyingEdgesAlgorithm::gridEdge& ge1 = gridEdges[k*ny + j+1]; FlyingEdgesAlgorithm::gridEdge& ge2 = gridEdges[(k+1)*ny + j]; FlyingEdgesAlgorithm::gridEdge& ge3 = gridEdges[(k+1)*ny + j+1]; int xl, xr; calcTrimValues(xl, xr, ge0, ge1, ge2, ge3); if(xl == xr) return; size_t triIdx = triCounter[k*(ny-1) + j]; uchar* curCubeCaseIds = cubeCases + (nx-1)*(k*(ny-1) + j); size_t x0counter = 0; size_t y0counter = 0; size_t z0counter = 0; size_t x1counter = 0; size_t z1counter = 0; size_t x2counter = 0; size_t y2counter = 0; size_t x3counter = 0; bool isYEnd = (j == ny-2); bool isZEnd = (k == nz-2); scalar_t pointCube[8*3]; scalar_t isovalCube[8]; scalar_t gradCube[8*3]; for(size_t i = xl; i != xr; ++i) { bool isXEnd = (i == nx-2); uchar caseId = curCubeCaseIds[i]; if(caseId == 0 || caseId == 255) { continue; } const bool* isCut = cuda_util::isCut[caseId]; // has 12 elements // Most of the information contained in pointCube, isovalCube // and gradCube will be used--but not necessarily all. It has // not been tested whether or not obtaining only the information // needed will provide a significant speedup--but // most likely not. // fill out pointCube, isovalCube and gradCube getCubeInfo(i, j, k, nx, ny, nz, pointValues, zeroPos, spacing, pointCube, isovalCube, gradCube); // Add Points and normals. // Calculate global indices for triangles int globalIdxs[12]; if(isCut[0]) { int idx = ge0.xstart + x0counter; interpolateOnCube(0, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(0, isoval, gradCube, isovalCube, normals + 3*idx); globalIdxs[0] = idx; ++x0counter; } if(isCut[3]) { int idx = ge0.ystart + y0counter; interpolateOnCube(3, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(3, isoval, gradCube, isovalCube, normals + 3*idx); globalIdxs[3] = idx; ++y0counter; } if(isCut[8]) { int idx = ge0.zstart + z0counter; interpolateOnCube(8, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(8, isoval, gradCube, isovalCube, normals + 3*idx); globalIdxs[8] = idx; ++z0counter; } // Note: // e1, e5, e9 and e11 will be visited in the next iteration // when they are e3, e7, e8 and 10 respectively. So don't // increment their counters. When the cube is an edge cube, // their counters don't need to be incremented because they // won't be used agin. // Manage boundary cases if needed. Otherwise just update // globalIdx. if(isCut[1]) { int idx = ge0.ystart + y0counter; if(isXEnd) { interpolateOnCube(1, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(1, isoval, gradCube, isovalCube, normals + 3*idx); // y0counter counter doesn't need to be incremented // because it won't be used again. } globalIdxs[1] = idx; } if(isCut[9]) { int idx = ge0.zstart + z0counter; if(isXEnd) { interpolateOnCube(9, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(9, isoval, gradCube, isovalCube, normals + 3*idx); // z0counter doesn't need to in incremented. } globalIdxs[9] = idx; } if(isCut[2]) { int idx = ge1.xstart + x1counter; if(isYEnd) { interpolateOnCube(2, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(2, isoval, gradCube, isovalCube, normals + 3*idx); } globalIdxs[2] = idx; ++x1counter; } if(isCut[10]) { int idx = ge1.zstart + z1counter; if(isYEnd) { interpolateOnCube(10, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(10, isoval, gradCube, isovalCube, normals + 3*idx); } globalIdxs[10] = idx; ++z1counter; } if(isCut[4]) { int idx = ge2.xstart + x2counter; if(isZEnd) { interpolateOnCube(4, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(4, isoval, gradCube, isovalCube, normals + 3*idx); } globalIdxs[4] = idx; ++x2counter; } if(isCut[7]) { int idx = ge2.ystart + y2counter; if(isZEnd) { interpolateOnCube(7, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(7, isoval, gradCube, isovalCube, normals + 3*idx); } globalIdxs[7] = idx; ++y2counter; } if(isCut[11]) { int idx = ge1.zstart + z1counter; if(isXEnd and isYEnd) { interpolateOnCube(11, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(11, isoval, gradCube, isovalCube, normals + 3*idx); // z1counter does not need to be incremented. } globalIdxs[11] = idx; } if(isCut[5]) { int idx = ge2.ystart + y2counter; if(isXEnd and isZEnd) { interpolateOnCube(5, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(5, isoval, gradCube, isovalCube, normals + 3*idx); // y2 counter does not need to be incremented. } globalIdxs[5] = idx; } if(isCut[6]) { int idx = ge3.xstart + x3counter; if(isYEnd and isZEnd) { interpolateOnCube(6, isoval, pointCube, isovalCube, points + 3*idx); interpolateOnCube(6, isoval, gradCube, isovalCube, normals + 3*idx); } globalIdxs[6] = idx; ++x3counter; } // Add triangles const char* caseTri = cuda_util::caseTriangles[caseId]; // size 16 for(int idx = 0; caseTri[idx] != -1; idx += 3) { tris[3*triIdx + 0] = i; tris[3*triIdx + 1] = j; tris[3*triIdx + 2] = k; // tris[3*triIdx + 0] = globalIdxs[caseTri[idx]]; // tris[3*triIdx + 1] = globalIdxs[caseTri[idx+1]]; // tris[3*triIdx + 2] = globalIdxs[caseTri[idx+2]]; // ++triIdx; } } } void FlyingEdgesAlgorithm::pass4() { // pass4 calculates points and normals // 1) points and normals // 1st kernel: Calculate the main cube rays // 2nd and third kernel: int ty = 1;//FE_BLOCK_WIDTH_Y / 2; // divide by 2? TODO figure out this problem.. int tz = 1;//FE_BLOCK_WIDTH_Z / 2; // gah.... uint3 gridDim = make_uint3(((ny-1) + ty - 1) / ty, ((nz-1) + tz - 1) / tz, 1); uint3 blockDim = make_uint3(ty, tz, 1); std::cout << gridDim.x << ", " << gridDim.y << ", " << gridDim.z << std::endl; std::cout << blockDim.x << ", " << blockDim.y << ", " << blockDim.z << std::endl; if(!validKernelSize(gridDim, blockDim)) std::cout << "GAHHHHHHHHHHHH GP2 ENGINE " << __LINE__ << std::endl; // TODO if(DEBUG) { cudaDeviceSynchronize(); } pass4gpu_pointsAndNormals<<<gridDim, blockDim>>>( nx, ny, nz, // input pointValues, zeroPos, spacing, // input isoval, // input gridEdges, triCounter, cubeCases, // input points, normals, tris); // output if(DEBUG) { cudaDeviceSynchronize(); std::cout << "MEOWWWWWW " << cudaGetErrorString(cudaGetLastError()) << std::endl; } if(DEBUG) { size_t sz = 3 * numPoints * sizeof(scalar_t); scalar_t* hostPts = (scalar_t*)malloc(sz); scalar_t* hostNrs = (scalar_t*)malloc(sz); int* hostTrs = (int*)malloc(3*numTris*sizeof(int)); cudaMemcpy(hostPts, points, sz, cudaMemcpyDeviceToHost); cudaMemcpy(hostNrs, normals, sz, cudaMemcpyDeviceToHost); cudaMemcpy(hostTrs, tris, 3*numTris*sizeof(int), cudaMemcpyDeviceToHost); scalar_t accumP = 0.0; for(int idx = 0; idx != 3 * numPoints; ++idx) { accumP += hostPts[idx]; accumP += hostTrs[idx]; while(accumP >= 1000000) accumP -= 1000000; } int accumT = 0; int num0 = -1; int num9 = 0; int num8 = 0; int num7 = 0; int numSetPoints = 0; for(int idx = 0; idx != 3 * numPoints; ++idx) { if(hostPts[idx] != -1) numSetPoints += 1; } std::cout << "numSetPoints " << numSetPoints << std::endl; for(int idx = 0; idx != 3 * numTris; ++idx) { if(hostTrs[idx] == 0) num0 += 1; if(hostTrs[idx] == 9) num9 += 1; if(hostTrs[idx] == 8) num8 += 1; if(hostTrs[idx] == 7) num7 += 1; accumT += hostTrs[idx]; while(accumT >= 1000000) accumT -= 1000000; } std::cout << "pass 4 hashsum " << accumP << ", " << accumT << std::endl; std::cout << "num0 in Tris " << num0 << std::endl; std::cout << "num9 in Tris " << num9 << std::endl; std::cout << "num8 in Tris " << num8 << std::endl; std::cout << "num7 in Tris " << num7 << std::endl; for(int idx = 0; idx != numTris*3; idx += 3) { if(hostTrs[idx] != -1) { std::cout << hostTrs[idx+0] << ", " << hostTrs[idx+1] << ", " << hostTrs[idx+2] << std::endl; } } free(hostPts); free(hostNrs); free(hostTrs); } }
2b5d176c3b9a6ccb0ed95889abee7de34820199b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> const int threadsPerBlock_x = 16; //naive __global__ void MatrixMulKernel1(float *dev_a, float *dev_b, float *dev_c, int n) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; float temp = 0; int k; for (k = 0; k < n; k++) { temp += dev_a[y * n + k] * dev_b[k * n + x]; } dev_c[y * n + x] = temp; } //tiling __global__ void MatrixMulKernel2(float *dev_a, float *dev_b, float *dev_c, int n) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; float Pvalue = 0; for (int i = 0; i < gridDim.x; i++) { __shared__ float row_a[threadsPerBlock_x][threadsPerBlock_x]; __shared__ float col_b[threadsPerBlock_x][threadsPerBlock_x]; row_a[ty][tx] = *(dev_a + (ty + by * blockDim.y) * n + tx + i * blockDim.x); col_b[ty][tx] = *(dev_b + (ty + i * blockDim.y) * n + tx + bx * blockDim.x); __syncthreads(); for (int j = 0; j < blockDim.x; j++) Pvalue += row_a[ty][j] * col_b[j][tx]; __syncthreads(); } dev_c[(ty + by * blockDim.y) * n + tx + bx * blockDim.x] = Pvalue; } //matrix transpose with tiling __global__ void MatrixMulKernel3(float *dev_a, float *dev_b, float *dev_c, int n) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; __shared__ float A_tile[threadsPerBlock_x][threadsPerBlock_x]; __shared__ float B_tile[threadsPerBlock_x][threadsPerBlock_x]; float Pvalue = 0; for (int i = 0; i < gridDim.x; i++) { int i = ty + by * blockDim.y; int j = tx + i * blockDim.x; A_tile[ty][tx] = *(dev_a + i * n + j); B_tile[tx][ty] = *(dev_b + i * n + j); __syncthreads(); for (int j = 0; j < blockDim.x; j++) Pvalue += A_tile[ty][j] * B_tile[tx][j]; __syncthreads(); } dev_c[(ty + by * blockDim.y) * n + tx + bx * blockDim.x] = Pvalue; } //transpose without tiling __global__ void MatrixMulKernel31(float *dev_a, float *dev_b, float *dev_c, int n) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; float temp = 0; int k; for (k = 0; k < n; k++) { temp += dev_a[y * n + k] * dev_b[x * n + k]; } dev_c[y * n + x] = temp; } //loop unrolling __global__ void MatrixMulKernel4(float* Md, float* Nd, float* Pd, int Width) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; float Pvalue = 0; for (int m = 0; m < gridDim.x; ++m) { __shared__ float Mds[threadsPerBlock_x][threadsPerBlock_x]; __shared__ float Nds[threadsPerBlock_x][threadsPerBlock_x]; Mds[ty][tx] = *(Md + (by*blockDim.y + ty) * Width + m*blockDim.x + tx); Nds[ty][tx] = *(Nd + (m*blockDim.y + ty) * Width + bx*blockDim.x + tx); __syncthreads(); Pvalue += Mds[ty][0] * Nds[0][tx] + Mds[ty][1] * Nds[1][tx] + Mds[ty][2] * Nds[2][tx] + Mds[ty][3] * Nds[3][tx] + Mds[ty][4] * Nds[4][tx] + Mds[ty][5] * Nds[5][tx] + Mds[ty][6] * Nds[6][tx] + Mds[ty][7] * Nds[7][tx] + Mds[ty][8] * Nds[8][tx] + Mds[ty][9] * Nds[9][tx] + Mds[ty][10] * Nds[10][tx] + Mds[ty][11] * Nds[11][tx] + Mds[ty][12] * Nds[12][tx] + Mds[ty][13] * Nds[13][tx] + Mds[ty][14] * Nds[14][tx] + Mds[ty][15] * Nds[15][tx]; __syncthreads(); } Pd[(by*blockDim.y+ty)*Width+bx*blockDim.x+tx] = Pvalue; } int main(int argc, char *argv[]) { long int N = 0; if (argc == 1) { N = 1024; } else { N = atoi(argv[1]); } long int blocksPerGrid_x = N / threadsPerBlock_x; float *a, *b, *c; float *d_a, *d_b, *d_c; FILE *file1, *file2, *file3; a = (float *)malloc(sizeof(float) * N * N); b = (float *)malloc(sizeof(float) * N * N); c = (float *)malloc(sizeof(float) * N * N); srand((unsigned)time(NULL)); if ((file1 = fopen("/home/tangjia/C++Parallel/MatrixMul/A_Matri.txt", "wt")) == NULL) { printf("Here is a mistake\n"); return 0; } file2 = fopen("/home/tangjia/C++Parallel/MatrixMul/B_Matri.txt", "wt"); for (int i = 0; i < N*N; i++) { a[i] = rand()/(float)RAND_MAX; b[i] = rand()/(float)RAND_MAX; // printf("%f %f\n",a[i], b[i]); } for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { fprintf(file1, "%-8f ", a[i*N+j]); fprintf(file2, "%-8f ", b[i*N+j]); } fprintf(file1, "\n"); fprintf(file2, "\n"); } hipMalloc((void **)&d_a, N * N * sizeof(float)); hipMalloc((void **)&d_b, N * N * sizeof(float)); hipMalloc((void **)&d_c, N * N * sizeof(float)); hipMemcpy(d_a, a, N*N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, b, N*N*sizeof(float), hipMemcpyHostToDevice); dim3 dimBlock(threadsPerBlock_x, threadsPerBlock_x); dim3 dimGrid(blocksPerGrid_x, blocksPerGrid_x); struct timeval start, end; float time_use, speed; gettimeofday(&start, NULL); hipDeviceSynchronize(); hipLaunchKernelGGL(( MatrixMulKernel3), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, N); hipDeviceSynchronize(); gettimeofday(&end, NULL); time_use = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec); speed = N*N*N/time_use/1000; printf("N: %ld, Time consumed: %.2f us, Speed: %.2f Gflops\n",N, time_use, speed); hipMemcpy(c, d_c, N*N*sizeof(float), hipMemcpyDeviceToHost); printf("stage 5\n"); file3 = fopen("/home/tangjia/C++Parallel/MatrixMul/C_Matri.txt", "wt"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { fprintf(file3, "%-8.2f", c[i*N+j]); } fprintf(file3, "\n"); } hipFree(d_a); hipFree(d_b); hipFree(d_c); free(a); free(b); free(c); return 1; }
2b5d176c3b9a6ccb0ed95889abee7de34820199b.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> const int threadsPerBlock_x = 16; //naive __global__ void MatrixMulKernel1(float *dev_a, float *dev_b, float *dev_c, int n) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; float temp = 0; int k; for (k = 0; k < n; k++) { temp += dev_a[y * n + k] * dev_b[k * n + x]; } dev_c[y * n + x] = temp; } //tiling __global__ void MatrixMulKernel2(float *dev_a, float *dev_b, float *dev_c, int n) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; float Pvalue = 0; for (int i = 0; i < gridDim.x; i++) { __shared__ float row_a[threadsPerBlock_x][threadsPerBlock_x]; __shared__ float col_b[threadsPerBlock_x][threadsPerBlock_x]; row_a[ty][tx] = *(dev_a + (ty + by * blockDim.y) * n + tx + i * blockDim.x); col_b[ty][tx] = *(dev_b + (ty + i * blockDim.y) * n + tx + bx * blockDim.x); __syncthreads(); for (int j = 0; j < blockDim.x; j++) Pvalue += row_a[ty][j] * col_b[j][tx]; __syncthreads(); } dev_c[(ty + by * blockDim.y) * n + tx + bx * blockDim.x] = Pvalue; } //matrix transpose with tiling __global__ void MatrixMulKernel3(float *dev_a, float *dev_b, float *dev_c, int n) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; __shared__ float A_tile[threadsPerBlock_x][threadsPerBlock_x]; __shared__ float B_tile[threadsPerBlock_x][threadsPerBlock_x]; float Pvalue = 0; for (int i = 0; i < gridDim.x; i++) { int i = ty + by * blockDim.y; int j = tx + i * blockDim.x; A_tile[ty][tx] = *(dev_a + i * n + j); B_tile[tx][ty] = *(dev_b + i * n + j); __syncthreads(); for (int j = 0; j < blockDim.x; j++) Pvalue += A_tile[ty][j] * B_tile[tx][j]; __syncthreads(); } dev_c[(ty + by * blockDim.y) * n + tx + bx * blockDim.x] = Pvalue; } //transpose without tiling __global__ void MatrixMulKernel31(float *dev_a, float *dev_b, float *dev_c, int n) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; float temp = 0; int k; for (k = 0; k < n; k++) { temp += dev_a[y * n + k] * dev_b[x * n + k]; } dev_c[y * n + x] = temp; } //loop unrolling __global__ void MatrixMulKernel4(float* Md, float* Nd, float* Pd, int Width) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; float Pvalue = 0; for (int m = 0; m < gridDim.x; ++m) { __shared__ float Mds[threadsPerBlock_x][threadsPerBlock_x]; __shared__ float Nds[threadsPerBlock_x][threadsPerBlock_x]; Mds[ty][tx] = *(Md + (by*blockDim.y + ty) * Width + m*blockDim.x + tx); Nds[ty][tx] = *(Nd + (m*blockDim.y + ty) * Width + bx*blockDim.x + tx); __syncthreads(); Pvalue += Mds[ty][0] * Nds[0][tx] + Mds[ty][1] * Nds[1][tx] + Mds[ty][2] * Nds[2][tx] + Mds[ty][3] * Nds[3][tx] + Mds[ty][4] * Nds[4][tx] + Mds[ty][5] * Nds[5][tx] + Mds[ty][6] * Nds[6][tx] + Mds[ty][7] * Nds[7][tx] + Mds[ty][8] * Nds[8][tx] + Mds[ty][9] * Nds[9][tx] + Mds[ty][10] * Nds[10][tx] + Mds[ty][11] * Nds[11][tx] + Mds[ty][12] * Nds[12][tx] + Mds[ty][13] * Nds[13][tx] + Mds[ty][14] * Nds[14][tx] + Mds[ty][15] * Nds[15][tx]; __syncthreads(); } Pd[(by*blockDim.y+ty)*Width+bx*blockDim.x+tx] = Pvalue; } int main(int argc, char *argv[]) { long int N = 0; if (argc == 1) { N = 1024; } else { N = atoi(argv[1]); } long int blocksPerGrid_x = N / threadsPerBlock_x; float *a, *b, *c; float *d_a, *d_b, *d_c; FILE *file1, *file2, *file3; a = (float *)malloc(sizeof(float) * N * N); b = (float *)malloc(sizeof(float) * N * N); c = (float *)malloc(sizeof(float) * N * N); srand((unsigned)time(NULL)); if ((file1 = fopen("/home/tangjia/C++Parallel/MatrixMul/A_Matri.txt", "wt")) == NULL) { printf("Here is a mistake\n"); return 0; } file2 = fopen("/home/tangjia/C++Parallel/MatrixMul/B_Matri.txt", "wt"); for (int i = 0; i < N*N; i++) { a[i] = rand()/(float)RAND_MAX; b[i] = rand()/(float)RAND_MAX; // printf("%f %f\n",a[i], b[i]); } for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { fprintf(file1, "%-8f ", a[i*N+j]); fprintf(file2, "%-8f ", b[i*N+j]); } fprintf(file1, "\n"); fprintf(file2, "\n"); } cudaMalloc((void **)&d_a, N * N * sizeof(float)); cudaMalloc((void **)&d_b, N * N * sizeof(float)); cudaMalloc((void **)&d_c, N * N * sizeof(float)); cudaMemcpy(d_a, a, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, N*N*sizeof(float), cudaMemcpyHostToDevice); dim3 dimBlock(threadsPerBlock_x, threadsPerBlock_x); dim3 dimGrid(blocksPerGrid_x, blocksPerGrid_x); struct timeval start, end; float time_use, speed; gettimeofday(&start, NULL); cudaThreadSynchronize(); MatrixMulKernel3<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, N); cudaThreadSynchronize(); gettimeofday(&end, NULL); time_use = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec); speed = N*N*N/time_use/1000; printf("N: %ld, Time consumed: %.2f us, Speed: %.2f Gflops\n",N, time_use, speed); cudaMemcpy(c, d_c, N*N*sizeof(float), cudaMemcpyDeviceToHost); printf("stage 5\n"); file3 = fopen("/home/tangjia/C++Parallel/MatrixMul/C_Matri.txt", "wt"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { fprintf(file3, "%-8.2f", c[i*N+j]); } fprintf(file3, "\n"); } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(a); free(b); free(c); return 1; }
aa48415c5a4a6cdba7ed4a2c2fce8ae590f02f4f.hip
// !!! This is a file automatically generated by hipify!!! #include "scan.hpp" #include "checkCudaErrors.hpp" #include "cudaMemory.hpp" #include "functions.hpp" #include <hip/hip_runtime.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> // -------------------- GPU Parallel Reduce Add (thrust) -------------------- void thrustGPUscan( const unsigned int* const d_in, const unsigned int length, unsigned int* const d_out ) { thrust::inclusive_scan(thrust::device, d_in, d_in + length, d_out, thrust::plus<unsigned int>()); } // -------------------- GPU Parallel Reduce Add -------------------- template <typename T> __global__ void kernelIncrementEach( T* const d_out, const T* const d_increment, const unsigned int length ) { unsigned int absIdx = (blockIdx.x + 1)*blockDim.x + threadIdx.x; // first 1024 elements don't need to do any more work if (absIdx >= length) return; d_out[absIdx] += d_increment[blockIdx.x]; } template <typename T> __global__ void kernelHillisSteelScanAdd( T* const d_in, const unsigned int length, T* const d_out, T* const d_increment ) { extern __shared__ T sh_data[]; // double buffer allows for read-modify-write op with only one __syncthreads() unsigned int absIdx = blockIdx.x*blockDim.x + threadIdx.x; unsigned int idx = threadIdx.x; unsigned int bDim = blockDim.x; unsigned int buffIn = bDim, buffOut = 0; if (idx == 0) { sh_data[idx] = d_in[absIdx]; } else if (absIdx >= length) { return; } else { sh_data[buffOut + idx] = d_in[absIdx - 1] + d_in[absIdx]; } __syncthreads(); for (unsigned int i = 2; i < blockDim.x; i <<= 1) { buffOut = bDim - buffOut; buffIn = bDim - buffIn; if (idx < i) { sh_data[buffOut + idx] = sh_data[buffIn + idx]; d_out[absIdx] = sh_data[buffOut + idx]; return; } else { sh_data[buffOut + idx] = sh_data[buffIn + idx] + sh_data[buffIn + idx - i]; } __syncthreads(); } d_out[absIdx] = sh_data[buffOut + idx]; if (idx == blockDim.x - 1) d_increment[blockIdx.x] = sh_data[buffOut + idx]; } template <typename T> void pGPUscan( T* const d_in, const unsigned int length, T* const d_out, const unsigned int block_dim ) { dim3 blockDim(block_dim, 1, 1); unsigned int n = ui_ceilDiv(length, blockDim.x); unsigned int steps = double_ceilDiv( std::log2(n), std::log2(blockDim.x) ) + 1; T* d_inters[steps+1]; T* d_outs[steps+1]; const unsigned int* gridXs[steps+1]; d_inters[0] = d_in; d_outs[0] = d_out; gridXs[0] = &length; for (unsigned int i = 1; i <= steps; i++) { unsigned int* gridX = new unsigned int; *gridX = ui_ceilDiv(*gridXs[i-1], blockDim.x); gridXs[i] = gridX; dim3 gridDim(*gridXs[i], 1, 1); T* d_increment; T* d_outNext; // for the next run, the last will be unused allocCudaMem((void**) &d_increment, gridDim.x*sizeof(unsigned int)); // gpuMemFree((void**) &d_increment); allocCudaMem((void**) &d_outNext, gridDim.x*sizeof(unsigned int)); // gpuMemFree((void**) &d_outNext); d_inters[i] = d_increment; d_outs[i] = d_outNext; hipLaunchKernelGGL(( kernelHillisSteelScanAdd), dim3(gridDim), dim3(blockDim), blockDim.x*2*sizeof(T), 0, d_inters[i-1], *gridXs[i-1], d_outs[i-1], d_inters[i]); } for (unsigned int i = steps-1; i >= 1; i--) { dim3 gridDim(*gridXs[i]-1, 1, 1); // first 1024 elements don't need an update, so lunch 1 block less hipLaunchKernelGGL(( kernelIncrementEach), dim3(gridDim), dim3(blockDim), 0, 0, d_outs[i-1], d_outs[i], *gridXs[i-1]); } hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); for (unsigned int i = 1; i <= steps; i++) { delete gridXs[i]; gpuMemFree((void**) &d_inters[i]); gpuMemFree((void**) &d_outs[i]); } } void parallelGPUscan( unsigned int* const d_in, const unsigned int length, unsigned int* const d_out ) { pGPUscan(d_in, length, d_out, 128); }
aa48415c5a4a6cdba7ed4a2c2fce8ae590f02f4f.cu
#include "scan.hpp" #include "checkCudaErrors.hpp" #include "cudaMemory.hpp" #include "functions.hpp" #include <cuda.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> // -------------------- GPU Parallel Reduce Add (thrust) -------------------- void thrustGPUscan( const unsigned int* const d_in, const unsigned int length, unsigned int* const d_out ) { thrust::inclusive_scan(thrust::device, d_in, d_in + length, d_out, thrust::plus<unsigned int>()); } // -------------------- GPU Parallel Reduce Add -------------------- template <typename T> __global__ void kernelIncrementEach( T* const d_out, const T* const d_increment, const unsigned int length ) { unsigned int absIdx = (blockIdx.x + 1)*blockDim.x + threadIdx.x; // first 1024 elements don't need to do any more work if (absIdx >= length) return; d_out[absIdx] += d_increment[blockIdx.x]; } template <typename T> __global__ void kernelHillisSteelScanAdd( T* const d_in, const unsigned int length, T* const d_out, T* const d_increment ) { extern __shared__ T sh_data[]; // double buffer allows for read-modify-write op with only one __syncthreads() unsigned int absIdx = blockIdx.x*blockDim.x + threadIdx.x; unsigned int idx = threadIdx.x; unsigned int bDim = blockDim.x; unsigned int buffIn = bDim, buffOut = 0; if (idx == 0) { sh_data[idx] = d_in[absIdx]; } else if (absIdx >= length) { return; } else { sh_data[buffOut + idx] = d_in[absIdx - 1] + d_in[absIdx]; } __syncthreads(); for (unsigned int i = 2; i < blockDim.x; i <<= 1) { buffOut = bDim - buffOut; buffIn = bDim - buffIn; if (idx < i) { sh_data[buffOut + idx] = sh_data[buffIn + idx]; d_out[absIdx] = sh_data[buffOut + idx]; return; } else { sh_data[buffOut + idx] = sh_data[buffIn + idx] + sh_data[buffIn + idx - i]; } __syncthreads(); } d_out[absIdx] = sh_data[buffOut + idx]; if (idx == blockDim.x - 1) d_increment[blockIdx.x] = sh_data[buffOut + idx]; } template <typename T> void pGPUscan( T* const d_in, const unsigned int length, T* const d_out, const unsigned int block_dim ) { dim3 blockDim(block_dim, 1, 1); unsigned int n = ui_ceilDiv(length, blockDim.x); unsigned int steps = double_ceilDiv( std::log2(n), std::log2(blockDim.x) ) + 1; T* d_inters[steps+1]; T* d_outs[steps+1]; const unsigned int* gridXs[steps+1]; d_inters[0] = d_in; d_outs[0] = d_out; gridXs[0] = &length; for (unsigned int i = 1; i <= steps; i++) { unsigned int* gridX = new unsigned int; *gridX = ui_ceilDiv(*gridXs[i-1], blockDim.x); gridXs[i] = gridX; dim3 gridDim(*gridXs[i], 1, 1); T* d_increment; T* d_outNext; // for the next run, the last will be unused allocCudaMem((void**) &d_increment, gridDim.x*sizeof(unsigned int)); // gpuMemFree((void**) &d_increment); allocCudaMem((void**) &d_outNext, gridDim.x*sizeof(unsigned int)); // gpuMemFree((void**) &d_outNext); d_inters[i] = d_increment; d_outs[i] = d_outNext; kernelHillisSteelScanAdd<<<gridDim, blockDim, blockDim.x*2*sizeof(T)>>>(d_inters[i-1], *gridXs[i-1], d_outs[i-1], d_inters[i]); } for (unsigned int i = steps-1; i >= 1; i--) { dim3 gridDim(*gridXs[i]-1, 1, 1); // first 1024 elements don't need an update, so lunch 1 block less kernelIncrementEach<<<gridDim, blockDim>>>(d_outs[i-1], d_outs[i], *gridXs[i-1]); } cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); for (unsigned int i = 1; i <= steps; i++) { delete gridXs[i]; gpuMemFree((void**) &d_inters[i]); gpuMemFree((void**) &d_outs[i]); } } void parallelGPUscan( unsigned int* const d_in, const unsigned int length, unsigned int* const d_out ) { pGPUscan(d_in, length, d_out, 128); }
377c6aff99921b0db3d20c1b76b4a80606cff13e.hip
// !!! This is a file automatically generated by hipify!!! #include "THHGeneral.h" #include "THHTensorCopy.h" #include "THHApply.cuh" #include "THHNumerics.cuh" inline int curGPU() { int curDev; THCudaCheck(hipGetDevice(&curDev)); return curDev; } // Copy operator for the pointwise apply kernel template <typename TypeDst, typename TypeSrc> struct CopyOp { __device__ __forceinline__ void operator()(TypeDst* dst, TypeSrc* src) { #if __CUDA_ARCH__ >= 350 *dst = ScalarConvert<TypeSrc, TypeDst>::to(__ldg(src)); #else *dst = ScalarConvert<TypeSrc, TypeDst>::to(*src); #endif } }; // Copy for the same type to the same type template <typename TensorTypeDst, typename TensorTypeSrc> void THC_copyTensor(THCState* state, TensorTypeDst* dst, TensorTypeSrc* src) { ptrdiff_t totalElements = TensorUtils<TensorTypeDst>::getNumElements(state, dst); THArgCheck(totalElements == TensorUtils<TensorTypeSrc>::getNumElements(state, src), 2, "sizes do not match"); if (TensorUtils<TensorTypeDst>::getDims(state, dst) == 0) { // Zero-dim tensor; copy nothing return; } // We can memcpy the memory if: // -both tensors are contiguous; or, // -there is only one element to copy; or, // -FIXME: if both tensors have matching size and stride arrays, and no // holes within (in other words, there is some permutation that can be applied // to the size/strides such that the resulting tensor is // contiguous). // -AND: both tensors have the same type. bool sameType = isSameType<TensorTypeSrc, TensorTypeDst>(); bool srcContig = TensorUtils<TensorTypeSrc>::isContiguous(state, src); bool dstContig = TensorUtils<TensorTypeDst>::isContiguous(state, dst); bool memcpyEligible = ((srcContig && dstContig) || (totalElements == 1)) && sameType; int srcDev = TensorUtils<TensorTypeSrc>::getDevice(state, src); int dstDev = TensorUtils<TensorTypeDst>::getDevice(state, dst); int oldDev = curGPU(); // Try to enable p2p access. This also handles the case srcDev == dstDev. bool p2pEnabled = THCState_getPeerToPeerAccess(state, srcDev, dstDev); // We always perform the copy on the source device, using the // current stream on the source device. // If the copy is on the default stream, then we fully synchronize // both src and dst's default streams for completion of the // copy. We have to explicitly do this for non-contig copies. // This mimics the behavior of cross-device hipMemcpyAsync on // the default stream. // If the copy is not on the default stream, then it is up to the // user to add needed synchronization on the dst device, since the // stream on the dst device that wishes to synchronize may not be // the same index as the one on the src device. hipStream_t copyStream = THCState_getCurrentStreamOnDevice(state, srcDev); if (srcDev != dstDev && copyStream == NULL) { // This is a cross-device copy on the default stream. We perform a // two-way barrier between both devices' default streams before // the copy. This ensures that any write-after-write and // write-after-read dependencies on the destination side are // handled, so that no one is operating on the dst memory when // we perform the copy. // src waits on dst barrier (src already waits on src) hipEvent_t dstReady; THCudaCheck(hipSetDevice(dstDev)); THCudaCheck(hipEventCreateWithFlags(&dstReady, hipEventDisableTiming)); THCudaCheck(hipEventRecord(dstReady, NULL)); THCudaCheck(hipSetDevice(srcDev)); THCudaCheck(hipStreamWaitEvent(NULL, dstReady, 0)); THCudaCheck(hipEventDestroy(dstReady)); } else if (srcDev != oldDev) { THCudaCheck(hipSetDevice(srcDev)); } // We are now on srcDev if (memcpyEligible) { // Perform the copy THCudaCheck(hipMemcpyAsync( TensorUtils<TensorTypeDst>::getData(state, dst), TensorUtils<TensorTypeSrc>::getData(state, src), totalElements * sizeof(typename TensorUtils<TensorTypeDst>::DataType), hipMemcpyDeviceToDevice, copyStream)); } else { // Non-contiguous copy or a type-conversion copy // We avoid creating temporary memory copies if possible. // If both src and dst are on the same device, or if they are on // different devices and p2p access is enabled, perform the copy // by a pointwise copy kernel. // Otherwise, we'll have to make contiguous (which will in fact // invoke copy() again), and then perform the copy. // FIXME: might want to consider only running the pointwise kernel // if both src and dst innermost dimensions are contiguous. If // they are not, then taking the hit of the memory allocation/free // might be worth it to avoid non-coalesced reads or writes. if (p2pEnabled) { bool succ = THC_pointwiseApply2( state, dst, src, CopyOp<typename TensorUtils<TensorTypeDst>::DataType, typename TensorUtils<TensorTypeSrc>::DataType>()); THArgCheck(succ, 2, CUTORCH_DIM_WARNING); } else { // GPUs can't access each other directly, but the tensors // involved are non-contiguous and/or are different types. // Make sure the src is contiguous and in the same type as dst THCudaCheck(hipSetDevice(srcDev)); TensorTypeDst* srcContig = NULL; if (sameType) { srcContig = (TensorTypeDst*) // this is actually the same type as src TensorUtils<TensorTypeSrc>::newContiguous(state, src); } else { // Types are different // Copy into the new format, contiguous, on the source device srcContig = TensorUtils<TensorTypeDst>::newTensor(state); TensorUtils<TensorTypeDst>::resizeAs(state, srcContig, dst); bool succ = THC_pointwiseApply2( state, srcContig, src, CopyOp<typename TensorUtils<TensorTypeDst>::DataType, typename TensorUtils<TensorTypeSrc>::DataType>()); THArgCheck(succ, 2, CUTORCH_DIM_WARNING); } // Make sure the dst is contiguous THCudaCheck(hipSetDevice(dstDev)); TensorTypeDst* dstContig = TensorUtils<TensorTypeDst>::newContiguous(state, dst); // Now, we are ready for a cross-device memcpy of contiguous // data, of the same layout and type THCudaCheck(hipSetDevice(srcDev)); THCudaCheck(hipMemcpyAsync( TensorUtils<TensorTypeDst>::getData(state, dstContig), TensorUtils<TensorTypeDst>::getData(state, srcContig), totalElements * sizeof(typename TensorUtils<TensorTypeDst>::DataType), hipMemcpyDeviceToDevice, copyStream)); // We are done with the src TensorUtils<TensorTypeDst>::free(state, srcContig); if (dst != dstContig) { TensorUtils<TensorTypeDst>::freeCopyTo(state, dstContig, dst); } else { TensorUtils<TensorTypeDst>::free(state, dstContig); } // We're still on srcDev at this point } } if (srcDev != dstDev && copyStream == NULL) { // dst waits on src barrier (dst already waits on dst). We cannot // operate on dst's copy until the copy is complete. // Still on srcDev, record default stream event hipEvent_t srcReady; THCudaCheck(hipEventCreateWithFlags(&srcReady, hipEventDisableTiming)); THCudaCheck(hipEventRecord(srcReady, NULL)); THCudaCheck(hipSetDevice(dstDev)); THCudaCheck(hipStreamWaitEvent(NULL, srcReady, 0)); THCudaCheck(hipEventDestroy(srcReady)); // We are now on dstDev (right above). Restore prior device from dst if (dstDev != oldDev) { THCudaCheck(hipSetDevice(oldDev)); } } else { // We are still on srcDev. Restore prior device from src if (srcDev != oldDev) { THCudaCheck(hipSetDevice(oldDev)); } } THCudaCheck(hipGetLastError()); } #include "generic/THCTensorCopy.cu" #include "THHGenerateAllTypes.h"
377c6aff99921b0db3d20c1b76b4a80606cff13e.cu
#include "THCGeneral.h" #include "THCTensorCopy.h" #include "THCApply.cuh" #include "THCNumerics.cuh" inline int curGPU() { int curDev; THCudaCheck(cudaGetDevice(&curDev)); return curDev; } // Copy operator for the pointwise apply kernel template <typename TypeDst, typename TypeSrc> struct CopyOp { __device__ __forceinline__ void operator()(TypeDst* dst, TypeSrc* src) { #if __CUDA_ARCH__ >= 350 *dst = ScalarConvert<TypeSrc, TypeDst>::to(__ldg(src)); #else *dst = ScalarConvert<TypeSrc, TypeDst>::to(*src); #endif } }; // Copy for the same type to the same type template <typename TensorTypeDst, typename TensorTypeSrc> void THC_copyTensor(THCState* state, TensorTypeDst* dst, TensorTypeSrc* src) { ptrdiff_t totalElements = TensorUtils<TensorTypeDst>::getNumElements(state, dst); THArgCheck(totalElements == TensorUtils<TensorTypeSrc>::getNumElements(state, src), 2, "sizes do not match"); if (TensorUtils<TensorTypeDst>::getDims(state, dst) == 0) { // Zero-dim tensor; copy nothing return; } // We can memcpy the memory if: // -both tensors are contiguous; or, // -there is only one element to copy; or, // -FIXME: if both tensors have matching size and stride arrays, and no // holes within (in other words, there is some permutation that can be applied // to the size/strides such that the resulting tensor is // contiguous). // -AND: both tensors have the same type. bool sameType = isSameType<TensorTypeSrc, TensorTypeDst>(); bool srcContig = TensorUtils<TensorTypeSrc>::isContiguous(state, src); bool dstContig = TensorUtils<TensorTypeDst>::isContiguous(state, dst); bool memcpyEligible = ((srcContig && dstContig) || (totalElements == 1)) && sameType; int srcDev = TensorUtils<TensorTypeSrc>::getDevice(state, src); int dstDev = TensorUtils<TensorTypeDst>::getDevice(state, dst); int oldDev = curGPU(); // Try to enable p2p access. This also handles the case srcDev == dstDev. bool p2pEnabled = THCState_getPeerToPeerAccess(state, srcDev, dstDev); // We always perform the copy on the source device, using the // current stream on the source device. // If the copy is on the default stream, then we fully synchronize // both src and dst's default streams for completion of the // copy. We have to explicitly do this for non-contig copies. // This mimics the behavior of cross-device cudaMemcpyAsync on // the default stream. // If the copy is not on the default stream, then it is up to the // user to add needed synchronization on the dst device, since the // stream on the dst device that wishes to synchronize may not be // the same index as the one on the src device. cudaStream_t copyStream = THCState_getCurrentStreamOnDevice(state, srcDev); if (srcDev != dstDev && copyStream == NULL) { // This is a cross-device copy on the default stream. We perform a // two-way barrier between both devices' default streams before // the copy. This ensures that any write-after-write and // write-after-read dependencies on the destination side are // handled, so that no one is operating on the dst memory when // we perform the copy. // src waits on dst barrier (src already waits on src) cudaEvent_t dstReady; THCudaCheck(cudaSetDevice(dstDev)); THCudaCheck(cudaEventCreateWithFlags(&dstReady, cudaEventDisableTiming)); THCudaCheck(cudaEventRecord(dstReady, NULL)); THCudaCheck(cudaSetDevice(srcDev)); THCudaCheck(cudaStreamWaitEvent(NULL, dstReady, 0)); THCudaCheck(cudaEventDestroy(dstReady)); } else if (srcDev != oldDev) { THCudaCheck(cudaSetDevice(srcDev)); } // We are now on srcDev if (memcpyEligible) { // Perform the copy THCudaCheck(cudaMemcpyAsync( TensorUtils<TensorTypeDst>::getData(state, dst), TensorUtils<TensorTypeSrc>::getData(state, src), totalElements * sizeof(typename TensorUtils<TensorTypeDst>::DataType), cudaMemcpyDeviceToDevice, copyStream)); } else { // Non-contiguous copy or a type-conversion copy // We avoid creating temporary memory copies if possible. // If both src and dst are on the same device, or if they are on // different devices and p2p access is enabled, perform the copy // by a pointwise copy kernel. // Otherwise, we'll have to make contiguous (which will in fact // invoke copy() again), and then perform the copy. // FIXME: might want to consider only running the pointwise kernel // if both src and dst innermost dimensions are contiguous. If // they are not, then taking the hit of the memory allocation/free // might be worth it to avoid non-coalesced reads or writes. if (p2pEnabled) { bool succ = THC_pointwiseApply2( state, dst, src, CopyOp<typename TensorUtils<TensorTypeDst>::DataType, typename TensorUtils<TensorTypeSrc>::DataType>()); THArgCheck(succ, 2, CUTORCH_DIM_WARNING); } else { // GPUs can't access each other directly, but the tensors // involved are non-contiguous and/or are different types. // Make sure the src is contiguous and in the same type as dst THCudaCheck(cudaSetDevice(srcDev)); TensorTypeDst* srcContig = NULL; if (sameType) { srcContig = (TensorTypeDst*) // this is actually the same type as src TensorUtils<TensorTypeSrc>::newContiguous(state, src); } else { // Types are different // Copy into the new format, contiguous, on the source device srcContig = TensorUtils<TensorTypeDst>::newTensor(state); TensorUtils<TensorTypeDst>::resizeAs(state, srcContig, dst); bool succ = THC_pointwiseApply2( state, srcContig, src, CopyOp<typename TensorUtils<TensorTypeDst>::DataType, typename TensorUtils<TensorTypeSrc>::DataType>()); THArgCheck(succ, 2, CUTORCH_DIM_WARNING); } // Make sure the dst is contiguous THCudaCheck(cudaSetDevice(dstDev)); TensorTypeDst* dstContig = TensorUtils<TensorTypeDst>::newContiguous(state, dst); // Now, we are ready for a cross-device memcpy of contiguous // data, of the same layout and type THCudaCheck(cudaSetDevice(srcDev)); THCudaCheck(cudaMemcpyAsync( TensorUtils<TensorTypeDst>::getData(state, dstContig), TensorUtils<TensorTypeDst>::getData(state, srcContig), totalElements * sizeof(typename TensorUtils<TensorTypeDst>::DataType), cudaMemcpyDeviceToDevice, copyStream)); // We are done with the src TensorUtils<TensorTypeDst>::free(state, srcContig); if (dst != dstContig) { TensorUtils<TensorTypeDst>::freeCopyTo(state, dstContig, dst); } else { TensorUtils<TensorTypeDst>::free(state, dstContig); } // We're still on srcDev at this point } } if (srcDev != dstDev && copyStream == NULL) { // dst waits on src barrier (dst already waits on dst). We cannot // operate on dst's copy until the copy is complete. // Still on srcDev, record default stream event cudaEvent_t srcReady; THCudaCheck(cudaEventCreateWithFlags(&srcReady, cudaEventDisableTiming)); THCudaCheck(cudaEventRecord(srcReady, NULL)); THCudaCheck(cudaSetDevice(dstDev)); THCudaCheck(cudaStreamWaitEvent(NULL, srcReady, 0)); THCudaCheck(cudaEventDestroy(srcReady)); // We are now on dstDev (right above). Restore prior device from dst if (dstDev != oldDev) { THCudaCheck(cudaSetDevice(oldDev)); } } else { // We are still on srcDev. Restore prior device from src if (srcDev != oldDev) { THCudaCheck(cudaSetDevice(oldDev)); } } THCudaCheck(cudaGetLastError()); } #include "generic/THCTensorCopy.cu" #include "THCGenerateAllTypes.h"
a945b2fb9d129ce3b86b4ea305d3ef782d3c24e0.hip
// !!! This is a file automatically generated by hipify!!! #include "THHReduceApplyUtils.cuh" #include <assert.h> #include <stdlib.h> // Maximum size per grid dimension that we assume (compute capability >= 2.0) #define MAX_GRID_SIZE 65535LL void THCCheckTensorDims(THCState* state, THCudaTensor* tensor, int arg) { int64_t dims = THCudaTensor__nDimension(state, tensor); THArgCheck(dims <= MAX_CUTORCH_DIMS, arg, CUTORCH_DIM_WARNING); } bool THC_getGridFromTiles(ptrdiff_t gridTiles, dim3& grid) { if (gridTiles > MAX_GRID_SIZE * MAX_GRID_SIZE * MAX_GRID_SIZE) { return false; } int64_t gridX = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; int64_t gridY = 1; int64_t gridZ = 1; if (gridTiles > MAX_GRID_SIZE) { gridTiles = THCCeilDiv(gridTiles, (ptrdiff_t) MAX_GRID_SIZE); gridY = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; if (gridTiles > MAX_GRID_SIZE) { gridTiles = THCCeilDiv(gridTiles, (ptrdiff_t) MAX_GRID_SIZE); gridZ = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; } } grid = dim3(gridX, gridY, gridZ); return true; }
a945b2fb9d129ce3b86b4ea305d3ef782d3c24e0.cu
#include "THCReduceApplyUtils.cuh" #include <assert.h> #include <stdlib.h> // Maximum size per grid dimension that we assume (compute capability >= 2.0) #define MAX_GRID_SIZE 65535LL void THCCheckTensorDims(THCState* state, THCudaTensor* tensor, int arg) { int64_t dims = THCudaTensor__nDimension(state, tensor); THArgCheck(dims <= MAX_CUTORCH_DIMS, arg, CUTORCH_DIM_WARNING); } bool THC_getGridFromTiles(ptrdiff_t gridTiles, dim3& grid) { if (gridTiles > MAX_GRID_SIZE * MAX_GRID_SIZE * MAX_GRID_SIZE) { return false; } int64_t gridX = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; int64_t gridY = 1; int64_t gridZ = 1; if (gridTiles > MAX_GRID_SIZE) { gridTiles = THCCeilDiv(gridTiles, (ptrdiff_t) MAX_GRID_SIZE); gridY = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; if (gridTiles > MAX_GRID_SIZE) { gridTiles = THCCeilDiv(gridTiles, (ptrdiff_t) MAX_GRID_SIZE); gridZ = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; } } grid = dim3(gridX, gridY, gridZ); return true; }
f825bc419b9469cb769c330293762030dce44d64.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime_api.h> #include <stdio.h> #include <iostream> #include <vector> #include <algorithm> #include <numeric> #define AxCheckError(err) CheckError(err,__FUNCTION__, __LINE__) #define AxCheckErrorMsg(err, msg) CheckErrorMsg(err, msg, __FUNCTION__, __LINE__) void GenerateTestData(int const N, float* const input, float* const filtered, float* const ref); void CompareData(int const N, float const* const a, float const* const b); void CheckError(hipError_t const err, char const* const fun, const int line); void CheckErrorMsg(hipError_t const err, char const* const msg, char const* const fun, int const line); #define BLOCK_SIZE 512 float const FILTER_COEFFS[21] = {0.005f,0.01f, 0.02f, 0.03f, 0.04f, 0.05f, 0.06f, 0.07f, 0.25f, 0.75f, 1.0f, 0.75f, 0.25f, 0.07f, 0.06f, 0.05f, 0.04f, 0.03f, 0.02f, 0.01f, 0.005f}; // Armazenado na Constant Memory __device__ __constant__ float FilterCoeffs[21] = {0.005f,0.01f, 0.02f, 0.03f, 0.04f, 0.05f, 0.06f, 0.07f, 0.25f, 0.75f, 1.0f, 0.75f, 0.25f, 0.07f, 0.06f, 0.05f, 0.04f, 0.03f, 0.02f, 0.01f, 0.005f}; // Usa apenas a Global Memory __global__ void GlobalFilter(float* const input, float* const filtered, int const N) { int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (10 < gIdx && gIdx < N - 10) { float sum; sum = input[gIdx - 10] * FilterCoeffs[ 0] + input[gIdx - 9] * FilterCoeffs[ 1] + input[gIdx - 8] * FilterCoeffs[ 2] + input[gIdx - 7] * FilterCoeffs[ 3] + input[gIdx - 6] * FilterCoeffs[ 4] + input[gIdx - 5] * FilterCoeffs[ 5] + input[gIdx - 4] * FilterCoeffs[ 6] + input[gIdx - 3] * FilterCoeffs[ 7] + input[gIdx - 2] * FilterCoeffs[ 8] + input[gIdx - 1] * FilterCoeffs[ 9] + input[gIdx ] * FilterCoeffs[10] + input[gIdx + 1] * FilterCoeffs[11] + input[gIdx + 2] * FilterCoeffs[12] + input[gIdx + 3] * FilterCoeffs[13] + input[gIdx + 4] * FilterCoeffs[14] + input[gIdx + 5] * FilterCoeffs[15] + input[gIdx + 6] * FilterCoeffs[16] + input[gIdx + 7] * FilterCoeffs[17] + input[gIdx + 8] * FilterCoeffs[18] + input[gIdx + 9] * FilterCoeffs[19] + input[gIdx + 10] * FilterCoeffs[20]; filtered[gIdx] = sum; } } // Usa a Shared Memory __global__ void SharedFilter(float* const input, float* const filtered, int const N) { __shared__ float inputS[BLOCK_SIZE+20]; int sIdx = threadIdx.x; long long gIdx = blockIdx.x * blockDim.x + threadIdx.x; // Dez valores extras no ?ndice int sIdxShift = sIdx + 10; // Todas as threads fazem a leitura de um elemento na Global Memory e armazenam na Shared Memory. if (gIdx < N) { inputS[sIdxShift] = input[gIdx]; } // As primeiras 10 threads no bloco armazenam os 10 valores extras nos 10 primeiros elementos da Shared Memory if(sIdx < 10 && blockIdx.x != 0) { inputS[sIdx] = input[gIdx - 10]; } // As ?ltimas 10 threads armazenam os 10 valores extras na Shared Memory if(sIdxShift >= blockDim.x && blockIdx.x < gridDim.x - 1) { inputS[sIdxShift + 10] = input[gIdx + 10]; } __syncthreads(); float sum; sum = inputS[sIdxShift - 10] * FilterCoeffs[ 0] + inputS[sIdxShift - 9] * FilterCoeffs[ 1] + inputS[sIdxShift - 8] * FilterCoeffs[ 2] + inputS[sIdxShift - 7] * FilterCoeffs[ 3] + inputS[sIdxShift - 6] * FilterCoeffs[ 4] + inputS[sIdxShift - 5] * FilterCoeffs[ 5] + inputS[sIdxShift - 4] * FilterCoeffs[ 6] + inputS[sIdxShift - 3] * FilterCoeffs[ 7] + inputS[sIdxShift - 2] * FilterCoeffs[ 8] + inputS[sIdxShift - 1] * FilterCoeffs[ 9] + inputS[sIdxShift ] * FilterCoeffs[10] + inputS[sIdxShift + 1] * FilterCoeffs[11] + inputS[sIdxShift + 2] * FilterCoeffs[12] + inputS[sIdxShift + 3] * FilterCoeffs[13] + inputS[sIdxShift + 4] * FilterCoeffs[14] + inputS[sIdxShift + 5] * FilterCoeffs[15] + inputS[sIdxShift + 6] * FilterCoeffs[16] + inputS[sIdxShift + 7] * FilterCoeffs[17] + inputS[sIdxShift + 8] * FilterCoeffs[18] + inputS[sIdxShift + 9] * FilterCoeffs[19] + inputS[sIdxShift + 10] * FilterCoeffs[20]; filtered[gIdx] = sum; } int main() { float *inputH, *filteredH, *refH; float *inputD, *filteredD; hipError_t e = hipSuccess; dim3 gridSize, gridSize2; dim3 blockSize; int const N = 16*1024*1024; int const N_BYTES = N * sizeof(float); inputH = (float*)malloc(N_BYTES); filteredH = (float*)malloc(N_BYTES); refH = (float*)malloc(N_BYTES); GenerateTestData(N, inputH, filteredH, refH); e = hipMalloc((void**)&inputD, N_BYTES); AxCheckError(e); e = hipMalloc((void**)&filteredD, N_BYTES); AxCheckError(e); e = hipMemcpy(inputD, inputH, N_BYTES, hipMemcpyHostToDevice); AxCheckError(e); gridSize.x = ((N + BLOCK_SIZE - 1) / BLOCK_SIZE); blockSize.x = BLOCK_SIZE; int const TRIALS = 5; std::vector<float> sharedTimes; std::vector<float> globalTimes; hipEvent_t start, stop; e = hipEventCreate(&start); AxCheckError(e); e = hipEventCreate(&stop); AxCheckError(e); e = hipProfilerStart(); for(int i = 0; i < TRIALS; i++) { e = hipEventRecord(start, 0); hipLaunchKernelGGL(( SharedFilter), dim3(gridSize), dim3(blockSize), 0, 0, inputD, filteredD, N); e = hipEventRecord(stop, 0); AxCheckError(hipDeviceSynchronize()); AxCheckError(hipGetLastError()); float elapsed; e = hipEventElapsedTime(&elapsed, start, stop); sharedTimes.push_back(elapsed); e = hipEventRecord(start, 0); hipLaunchKernelGGL(( GlobalFilter), dim3(gridSize), dim3(blockSize), 0, 0, inputD, filteredD, N); e = hipEventRecord(stop, 0); AxCheckError(hipDeviceSynchronize()); AxCheckError(hipGetLastError()); e = hipEventElapsedTime(&elapsed, start, stop); globalTimes.push_back(elapsed); } e = hipProfilerStop(); float averageTime = std::accumulate(globalTimes.begin(), globalTimes.end(), 0.0f)/globalTimes.size(); std::cout << "Global Memory time (ms): " << averageTime << std::endl; averageTime = std::accumulate(sharedTimes.begin(), sharedTimes.end(), 0.0f)/sharedTimes.size(); std::cout << "Shared Memory time (ms): " << averageTime << std::endl; /* Executando o kernel */ hipLaunchKernelGGL(( SharedFilter), dim3(gridSize), dim3(blockSize), 0, 0, inputD, filteredD, N); AxCheckError(hipDeviceSynchronize()); AxCheckError(hipGetLastError()); /* N?o geramos zeros para os 10 primeiros / ?ltimos 10 elementos no kernel. Na verdade, geramos valores usando ??????? Shared Memory n?o inicializada como entradas, logo elas est?o incorretas. Portanto, n?o os copiamos e confiamos ??????? no fato de que o filtro H foi previamente ajustado para zero. */ e = hipMemcpy(filteredH + 10, filteredD + 10, N_BYTES - 20 * sizeof(float), hipMemcpyDeviceToHost); AxCheckError(e); std::cout << "Validando o output do SharedFilter..." << std::endl; CompareData(N, filteredH, refH); /* Executando o kernel */ hipLaunchKernelGGL(( GlobalFilter), dim3(gridSize), dim3(blockSize), 0, 0, inputD, filteredD, N); AxCheckError(hipDeviceSynchronize()); AxCheckError(hipGetLastError()); /* N?s n?o geramos sa?da para os 10 primeiros / ?ltimos 10 elementos no kernel. Portanto, n?o os copiamos e confiamos ???? no fato de que o filtroH foi previamente ajustado para zero. */ e = hipMemcpy(filteredH + 10, filteredD + 10, N_BYTES - 20 * sizeof(float), hipMemcpyDeviceToHost); AxCheckError(e); std::cout << "Validando o output do GlobalFilter..." << std::endl; CompareData(N, filteredH, refH); hipFree(inputD); hipFree(filteredD); free(inputH); free(filteredH); free(refH); AxCheckError(hipDeviceReset()); getchar(); return 0; } void GenerateTestData(int const N, float* const input, float* const filtered, float* const ref) { int i; for(i = 0; i < N; i++) { //input[i] = ((float)rand())/RAND_MAX; input[i] = i; filtered[i] = 0.0f; } memset(ref, 0, N*sizeof(float) ); /* N?o podemos calcular um filtro de 21 pontos nas bordas da nossa matriz. ??????? Se todos os 21 pontos n?o estiverem dispon?veis, o resultado esperado ? zero! */ for(i = 10; i < N-10; i++) { ref[i] = (input[i-10]*FILTER_COEFFS[ 0] + input[i- 9]*FILTER_COEFFS[ 1] + input[i- 8]*FILTER_COEFFS[ 2] + input[i- 7]*FILTER_COEFFS[ 3] + input[i- 6]*FILTER_COEFFS[ 4] + input[i- 5]*FILTER_COEFFS[ 5] + input[i- 4]*FILTER_COEFFS[ 6] + input[i- 3]*FILTER_COEFFS[ 7] + input[i- 2]*FILTER_COEFFS[ 8] + input[i- 1]*FILTER_COEFFS[ 9] + input[i ]*FILTER_COEFFS[10] + input[i+ 1]*FILTER_COEFFS[11] + input[i+ 2]*FILTER_COEFFS[12] + input[i+ 3]*FILTER_COEFFS[13] + input[i+ 4]*FILTER_COEFFS[14] + input[i+ 5]*FILTER_COEFFS[15] + input[i+ 6]*FILTER_COEFFS[16] + input[i+ 7]*FILTER_COEFFS[17] + input[i+ 8]*FILTER_COEFFS[18] + input[i+ 9]*FILTER_COEFFS[19] + input[i+10]*FILTER_COEFFS[20]); } } int UlpDifference(float a, float b) { int iA, iB; iA = *((int*)(&a)); iB = *((int*)(&b)); return abs(iA - iB); } void CompareData(int const N, float const* const a, float const* const b) { int i; int different = 0; for(i = 0; i < N; i++) { different = (UlpDifference(a[i],b[i]) > 5); if(different) { std::cout << "Mismatch: " << a[i] << " " << b[i] << std::endl; break; } } if(different) { printf("Arrays do not match @%d.\n", i); } else { printf("Arrays match.\n"); } } void CheckError(hipError_t const err, char const* const fun, const int line) { if (err) { printf("CUDA Error Code[%d]: %s %s():%d\n",err,hipGetErrorString(err),fun,line); exit(1); } } void CheckErrorMsg(hipError_t const err, char const* const msg, char const* const fun, int const line) { if (err) { printf("CUDA Error Code[%d]: %s %s() %d\n%s\n",err,hipGetErrorString(err),fun,line,msg); exit(1); } }
f825bc419b9469cb769c330293762030dce44d64.cu
#include <cuda_profiler_api.h> #include <stdio.h> #include <iostream> #include <vector> #include <algorithm> #include <numeric> #define AxCheckError(err) CheckError(err,__FUNCTION__, __LINE__) #define AxCheckErrorMsg(err, msg) CheckErrorMsg(err, msg, __FUNCTION__, __LINE__) void GenerateTestData(int const N, float* const input, float* const filtered, float* const ref); void CompareData(int const N, float const* const a, float const* const b); void CheckError(cudaError_t const err, char const* const fun, const int line); void CheckErrorMsg(cudaError_t const err, char const* const msg, char const* const fun, int const line); #define BLOCK_SIZE 512 float const FILTER_COEFFS[21] = {0.005f,0.01f, 0.02f, 0.03f, 0.04f, 0.05f, 0.06f, 0.07f, 0.25f, 0.75f, 1.0f, 0.75f, 0.25f, 0.07f, 0.06f, 0.05f, 0.04f, 0.03f, 0.02f, 0.01f, 0.005f}; // Armazenado na Constant Memory __device__ __constant__ float FilterCoeffs[21] = {0.005f,0.01f, 0.02f, 0.03f, 0.04f, 0.05f, 0.06f, 0.07f, 0.25f, 0.75f, 1.0f, 0.75f, 0.25f, 0.07f, 0.06f, 0.05f, 0.04f, 0.03f, 0.02f, 0.01f, 0.005f}; // Usa apenas a Global Memory __global__ void GlobalFilter(float* const input, float* const filtered, int const N) { int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (10 < gIdx && gIdx < N - 10) { float sum; sum = input[gIdx - 10] * FilterCoeffs[ 0] + input[gIdx - 9] * FilterCoeffs[ 1] + input[gIdx - 8] * FilterCoeffs[ 2] + input[gIdx - 7] * FilterCoeffs[ 3] + input[gIdx - 6] * FilterCoeffs[ 4] + input[gIdx - 5] * FilterCoeffs[ 5] + input[gIdx - 4] * FilterCoeffs[ 6] + input[gIdx - 3] * FilterCoeffs[ 7] + input[gIdx - 2] * FilterCoeffs[ 8] + input[gIdx - 1] * FilterCoeffs[ 9] + input[gIdx ] * FilterCoeffs[10] + input[gIdx + 1] * FilterCoeffs[11] + input[gIdx + 2] * FilterCoeffs[12] + input[gIdx + 3] * FilterCoeffs[13] + input[gIdx + 4] * FilterCoeffs[14] + input[gIdx + 5] * FilterCoeffs[15] + input[gIdx + 6] * FilterCoeffs[16] + input[gIdx + 7] * FilterCoeffs[17] + input[gIdx + 8] * FilterCoeffs[18] + input[gIdx + 9] * FilterCoeffs[19] + input[gIdx + 10] * FilterCoeffs[20]; filtered[gIdx] = sum; } } // Usa a Shared Memory __global__ void SharedFilter(float* const input, float* const filtered, int const N) { __shared__ float inputS[BLOCK_SIZE+20]; int sIdx = threadIdx.x; long long gIdx = blockIdx.x * blockDim.x + threadIdx.x; // Dez valores extras no ?ndice int sIdxShift = sIdx + 10; // Todas as threads fazem a leitura de um elemento na Global Memory e armazenam na Shared Memory. if (gIdx < N) { inputS[sIdxShift] = input[gIdx]; } // As primeiras 10 threads no bloco armazenam os 10 valores extras nos 10 primeiros elementos da Shared Memory if(sIdx < 10 && blockIdx.x != 0) { inputS[sIdx] = input[gIdx - 10]; } // As ?ltimas 10 threads armazenam os 10 valores extras na Shared Memory if(sIdxShift >= blockDim.x && blockIdx.x < gridDim.x - 1) { inputS[sIdxShift + 10] = input[gIdx + 10]; } __syncthreads(); float sum; sum = inputS[sIdxShift - 10] * FilterCoeffs[ 0] + inputS[sIdxShift - 9] * FilterCoeffs[ 1] + inputS[sIdxShift - 8] * FilterCoeffs[ 2] + inputS[sIdxShift - 7] * FilterCoeffs[ 3] + inputS[sIdxShift - 6] * FilterCoeffs[ 4] + inputS[sIdxShift - 5] * FilterCoeffs[ 5] + inputS[sIdxShift - 4] * FilterCoeffs[ 6] + inputS[sIdxShift - 3] * FilterCoeffs[ 7] + inputS[sIdxShift - 2] * FilterCoeffs[ 8] + inputS[sIdxShift - 1] * FilterCoeffs[ 9] + inputS[sIdxShift ] * FilterCoeffs[10] + inputS[sIdxShift + 1] * FilterCoeffs[11] + inputS[sIdxShift + 2] * FilterCoeffs[12] + inputS[sIdxShift + 3] * FilterCoeffs[13] + inputS[sIdxShift + 4] * FilterCoeffs[14] + inputS[sIdxShift + 5] * FilterCoeffs[15] + inputS[sIdxShift + 6] * FilterCoeffs[16] + inputS[sIdxShift + 7] * FilterCoeffs[17] + inputS[sIdxShift + 8] * FilterCoeffs[18] + inputS[sIdxShift + 9] * FilterCoeffs[19] + inputS[sIdxShift + 10] * FilterCoeffs[20]; filtered[gIdx] = sum; } int main() { float *inputH, *filteredH, *refH; float *inputD, *filteredD; cudaError_t e = cudaSuccess; dim3 gridSize, gridSize2; dim3 blockSize; int const N = 16*1024*1024; int const N_BYTES = N * sizeof(float); inputH = (float*)malloc(N_BYTES); filteredH = (float*)malloc(N_BYTES); refH = (float*)malloc(N_BYTES); GenerateTestData(N, inputH, filteredH, refH); e = cudaMalloc((void**)&inputD, N_BYTES); AxCheckError(e); e = cudaMalloc((void**)&filteredD, N_BYTES); AxCheckError(e); e = cudaMemcpy(inputD, inputH, N_BYTES, cudaMemcpyHostToDevice); AxCheckError(e); gridSize.x = ((N + BLOCK_SIZE - 1) / BLOCK_SIZE); blockSize.x = BLOCK_SIZE; int const TRIALS = 5; std::vector<float> sharedTimes; std::vector<float> globalTimes; cudaEvent_t start, stop; e = cudaEventCreate(&start); AxCheckError(e); e = cudaEventCreate(&stop); AxCheckError(e); e = cudaProfilerStart(); for(int i = 0; i < TRIALS; i++) { e = cudaEventRecord(start, 0); SharedFilter<<<gridSize, blockSize>>>(inputD, filteredD, N); e = cudaEventRecord(stop, 0); AxCheckError(cudaDeviceSynchronize()); AxCheckError(cudaGetLastError()); float elapsed; e = cudaEventElapsedTime(&elapsed, start, stop); sharedTimes.push_back(elapsed); e = cudaEventRecord(start, 0); GlobalFilter<<<gridSize, blockSize>>>(inputD, filteredD, N); e = cudaEventRecord(stop, 0); AxCheckError(cudaDeviceSynchronize()); AxCheckError(cudaGetLastError()); e = cudaEventElapsedTime(&elapsed, start, stop); globalTimes.push_back(elapsed); } e = cudaProfilerStop(); float averageTime = std::accumulate(globalTimes.begin(), globalTimes.end(), 0.0f)/globalTimes.size(); std::cout << "Global Memory time (ms): " << averageTime << std::endl; averageTime = std::accumulate(sharedTimes.begin(), sharedTimes.end(), 0.0f)/sharedTimes.size(); std::cout << "Shared Memory time (ms): " << averageTime << std::endl; /* Executando o kernel */ SharedFilter<<<gridSize, blockSize>>>(inputD, filteredD, N); AxCheckError(cudaDeviceSynchronize()); AxCheckError(cudaGetLastError()); /* N?o geramos zeros para os 10 primeiros / ?ltimos 10 elementos no kernel. Na verdade, geramos valores usando ??????? Shared Memory n?o inicializada como entradas, logo elas est?o incorretas. Portanto, n?o os copiamos e confiamos ??????? no fato de que o filtro H foi previamente ajustado para zero. */ e = cudaMemcpy(filteredH + 10, filteredD + 10, N_BYTES - 20 * sizeof(float), cudaMemcpyDeviceToHost); AxCheckError(e); std::cout << "Validando o output do SharedFilter..." << std::endl; CompareData(N, filteredH, refH); /* Executando o kernel */ GlobalFilter<<<gridSize, blockSize>>>(inputD, filteredD, N); AxCheckError(cudaDeviceSynchronize()); AxCheckError(cudaGetLastError()); /* N?s n?o geramos sa?da para os 10 primeiros / ?ltimos 10 elementos no kernel. Portanto, n?o os copiamos e confiamos ???? no fato de que o filtroH foi previamente ajustado para zero. */ e = cudaMemcpy(filteredH + 10, filteredD + 10, N_BYTES - 20 * sizeof(float), cudaMemcpyDeviceToHost); AxCheckError(e); std::cout << "Validando o output do GlobalFilter..." << std::endl; CompareData(N, filteredH, refH); cudaFree(inputD); cudaFree(filteredD); free(inputH); free(filteredH); free(refH); AxCheckError(cudaDeviceReset()); getchar(); return 0; } void GenerateTestData(int const N, float* const input, float* const filtered, float* const ref) { int i; for(i = 0; i < N; i++) { //input[i] = ((float)rand())/RAND_MAX; input[i] = i; filtered[i] = 0.0f; } memset(ref, 0, N*sizeof(float) ); /* N?o podemos calcular um filtro de 21 pontos nas bordas da nossa matriz. ??????? Se todos os 21 pontos n?o estiverem dispon?veis, o resultado esperado ? zero! */ for(i = 10; i < N-10; i++) { ref[i] = (input[i-10]*FILTER_COEFFS[ 0] + input[i- 9]*FILTER_COEFFS[ 1] + input[i- 8]*FILTER_COEFFS[ 2] + input[i- 7]*FILTER_COEFFS[ 3] + input[i- 6]*FILTER_COEFFS[ 4] + input[i- 5]*FILTER_COEFFS[ 5] + input[i- 4]*FILTER_COEFFS[ 6] + input[i- 3]*FILTER_COEFFS[ 7] + input[i- 2]*FILTER_COEFFS[ 8] + input[i- 1]*FILTER_COEFFS[ 9] + input[i ]*FILTER_COEFFS[10] + input[i+ 1]*FILTER_COEFFS[11] + input[i+ 2]*FILTER_COEFFS[12] + input[i+ 3]*FILTER_COEFFS[13] + input[i+ 4]*FILTER_COEFFS[14] + input[i+ 5]*FILTER_COEFFS[15] + input[i+ 6]*FILTER_COEFFS[16] + input[i+ 7]*FILTER_COEFFS[17] + input[i+ 8]*FILTER_COEFFS[18] + input[i+ 9]*FILTER_COEFFS[19] + input[i+10]*FILTER_COEFFS[20]); } } int UlpDifference(float a, float b) { int iA, iB; iA = *((int*)(&a)); iB = *((int*)(&b)); return abs(iA - iB); } void CompareData(int const N, float const* const a, float const* const b) { int i; int different = 0; for(i = 0; i < N; i++) { different = (UlpDifference(a[i],b[i]) > 5); if(different) { std::cout << "Mismatch: " << a[i] << " " << b[i] << std::endl; break; } } if(different) { printf("Arrays do not match @%d.\n", i); } else { printf("Arrays match.\n"); } } void CheckError(cudaError_t const err, char const* const fun, const int line) { if (err) { printf("CUDA Error Code[%d]: %s %s():%d\n",err,cudaGetErrorString(err),fun,line); exit(1); } } void CheckErrorMsg(cudaError_t const err, char const* const msg, char const* const fun, int const line) { if (err) { printf("CUDA Error Code[%d]: %s %s() %d\n%s\n",err,cudaGetErrorString(err),fun,line,msg); exit(1); } }
9edc7f985e5d744a2284ba2b270e8d30f72ef30c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Deep Feature Flow // Copyright (c) 2017 Microsoft // Licensed under The MIT License // Written by Yuwen Xiong // ------------------------------------------------------------------ // Based on: // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License // https://github.com/shaoqingren/faster_rcnn // ------------------------------------------------------------------ //#include "gpu_nms.hpp" #include <vector> #include <iostream> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ std::cout << hipGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(hipGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to hipSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(hipSetDevice(device_id)); } void _nms(long* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(hipMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(hipMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(hipFree(boxes_dev)); CUDA_CHECK(hipFree(mask_dev)); } /* Generated by Cython 0.24 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) #error Cython requires Python 2.6+ or Python 3.2+. #else #define CYTHON_ABI "0_24" #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000 #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifndef __cplusplus #error "Cython files generated with the C++ option must be compiled with a C++ compiler." #endif #ifndef CYTHON_INLINE #define CYTHON_INLINE inline #endif template<typename T> void __Pyx_call_destructor(T& x) { x.~T(); } template<typename T> class __Pyx_FakeReference { public: __Pyx_FakeReference() : ptr(NULL) { } __Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { } T *operator->() { return ptr; } operator T&() { return *ptr; } private: T *ptr; }; #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__nms__gpu_nms #define __PYX_HAVE_API__nms__gpu_nms #include "string.h" #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "gpu_nms.hpp" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) && defined (_M_X64) #define __Pyx_sst_abs(value) _abs64(value) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* None.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "nms\\gpu_nms.pyx", "__init__.pxd", "type.pxd", }; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":725 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":726 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":727 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":728 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":732 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":733 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":734 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":735 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":739 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":740 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":749 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":750 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":751 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":754 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":755 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":757 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":758 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":760 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":761 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":762 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":764 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":765 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":766 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* ArgTypeTest.proto */ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /* BufferFormatCheck.proto */ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); // PROTO /* PyObjectGetAttrStr.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* BufferIndexError.proto */ static void __Pyx_RaiseBufferIndexError(int axis); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) /* SliceObject.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** py_start, PyObject** py_stop, PyObject** py_slice, int has_cstart, int has_cstop, int wraparound); /* BufferFallbackError.proto */ static void __Pyx_RaiseBufferFallbackError(void); /* PyThreadStateGet.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET(); #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #endif /* PyErrFetchRestore.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) PyErr_SetObject(PyExc_KeyError, args); Py_XDECREF(args); } return NULL; } Py_INCREF(value); return value; } #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* None.proto */ static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* None.proto */ static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* None.proto */ static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* CIntFromPy.proto */ static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* TypeImport.proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'nms.gpu_nms' */ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t = { "float32_t", NULL, sizeof(__pyx_t_5numpy_float32_t), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t = { "int32_t", NULL, sizeof(__pyx_t_5numpy_int32_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int32_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int32_t), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t = { "intp_t", NULL, sizeof(__pyx_t_5numpy_intp_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_intp_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_intp_t), 0 }; #define __Pyx_MODULE_NAME "nms.gpu_nms" int __pyx_module_is_main_nms__gpu_nms = 0; /* Implementation of 'nms.gpu_nms' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_RuntimeError; static const char __pyx_k_np[] = "np"; static const char __pyx_k_dets[] = "dets"; static const char __pyx_k_keep[] = "keep"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_int32[] = "int32"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_order[] = "order"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_scores[] = "scores"; static const char __pyx_k_thresh[] = "thresh"; static const char __pyx_k_argsort[] = "argsort"; static const char __pyx_k_gpu_nms[] = "gpu_nms"; static const char __pyx_k_num_out[] = "num_out"; static const char __pyx_k_boxes_dim[] = "boxes_dim"; static const char __pyx_k_boxes_num[] = "boxes_num"; static const char __pyx_k_device_id[] = "device_id"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_nms_gpu_nms[] = "nms.gpu_nms"; static const char __pyx_k_sorted_dets[] = "sorted_dets"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_D_v_zix_caffe_caffe_win_20160523[] = "D:\\v-zix\\caffe\\caffe-win-20160523\\models\\py-faster-rcnn-windows\\lib\\nms\\gpu_nms.pyx"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_kp_s_D_v_zix_caffe_caffe_win_20160523; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_argsort; static PyObject *__pyx_n_s_boxes_dim; static PyObject *__pyx_n_s_boxes_num; static PyObject *__pyx_n_s_dets; static PyObject *__pyx_n_s_device_id; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_gpu_nms; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_int32; static PyObject *__pyx_n_s_keep; static PyObject *__pyx_n_s_main; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_nms_gpu_nms; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_num_out; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_order; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_scores; static PyObject *__pyx_n_s_sorted_dets; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_thresh; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static PyObject *__pyx_int_4; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_slice_; static PyObject *__pyx_slice__3; static PyObject *__pyx_slice__4; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_codeobj__12; /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ /* Python wrapper */ static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_3nms_7gpu_nms_1gpu_nms = {"gpu_nms", (PyCFunction)__pyx_pw_3nms_7gpu_nms_1gpu_nms, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_dets = 0; PyObject *__pyx_v_thresh = 0; __pyx_t_5numpy_int32_t __pyx_v_device_id; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gpu_nms (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dets,&__pyx_n_s_thresh,&__pyx_n_s_device_id,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dets)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_thresh)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, 1); __PYX_ERR(0, 16, __pyx_L3_error) } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_device_id); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gpu_nms") < 0)) __PYX_ERR(0, 16, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_dets = ((PyArrayObject *)values[0]); __pyx_v_thresh = ((PyObject*)values[1]); if (values[2]) { __pyx_v_device_id = __Pyx_PyInt_As_npy_int32(values[2]); if (unlikely((__pyx_v_device_id == (npy_int32)-1) && PyErr_Occurred())) __PYX_ERR(0, 17, __pyx_L3_error) } else { __pyx_v_device_id = ((__pyx_t_5numpy_int32_t)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 16, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_dets), __pyx_ptype_5numpy_ndarray, 1, "dets", 0))) __PYX_ERR(0, 16, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_thresh), (&PyFloat_Type), 1, "thresh", 1))) __PYX_ERR(0, 16, __pyx_L1_error) __pyx_r = __pyx_pf_3nms_7gpu_nms_gpu_nms(__pyx_self, __pyx_v_dets, __pyx_v_thresh, __pyx_v_device_id); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id) { int __pyx_v_boxes_num; int __pyx_v_boxes_dim; int __pyx_v_num_out; PyArrayObject *__pyx_v_keep = 0; PyArrayObject *__pyx_v_scores = 0; PyArrayObject *__pyx_v_order = 0; PyArrayObject *__pyx_v_sorted_dets = 0; __Pyx_LocalBuf_ND __pyx_pybuffernd_dets; __Pyx_Buffer __pyx_pybuffer_dets; __Pyx_LocalBuf_ND __pyx_pybuffernd_keep; __Pyx_Buffer __pyx_pybuffer_keep; __Pyx_LocalBuf_ND __pyx_pybuffernd_order; __Pyx_Buffer __pyx_pybuffer_order; __Pyx_LocalBuf_ND __pyx_pybuffernd_scores; __Pyx_Buffer __pyx_pybuffer_scores; __Pyx_LocalBuf_ND __pyx_pybuffernd_sorted_dets; __Pyx_Buffer __pyx_pybuffer_sorted_dets; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; PyArrayObject *__pyx_t_7 = NULL; PyArrayObject *__pyx_t_8 = NULL; PyArrayObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; float __pyx_t_14; PyObject *__pyx_t_15 = NULL; PyObject *__pyx_t_16 = NULL; PyObject *__pyx_t_17 = NULL; __Pyx_RefNannySetupContext("gpu_nms", 0); __pyx_pybuffer_keep.pybuffer.buf = NULL; __pyx_pybuffer_keep.refcount = 0; __pyx_pybuffernd_keep.data = NULL; __pyx_pybuffernd_keep.rcbuffer = &__pyx_pybuffer_keep; __pyx_pybuffer_scores.pybuffer.buf = NULL; __pyx_pybuffer_scores.refcount = 0; __pyx_pybuffernd_scores.data = NULL; __pyx_pybuffernd_scores.rcbuffer = &__pyx_pybuffer_scores; __pyx_pybuffer_order.pybuffer.buf = NULL; __pyx_pybuffer_order.refcount = 0; __pyx_pybuffernd_order.data = NULL; __pyx_pybuffernd_order.rcbuffer = &__pyx_pybuffer_order; __pyx_pybuffer_sorted_dets.pybuffer.buf = NULL; __pyx_pybuffer_sorted_dets.refcount = 0; __pyx_pybuffernd_sorted_dets.data = NULL; __pyx_pybuffernd_sorted_dets.rcbuffer = &__pyx_pybuffer_sorted_dets; __pyx_pybuffer_dets.pybuffer.buf = NULL; __pyx_pybuffer_dets.refcount = 0; __pyx_pybuffernd_dets.data = NULL; __pyx_pybuffernd_dets.rcbuffer = &__pyx_pybuffer_dets; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dets.rcbuffer->pybuffer, (PyObject*)__pyx_v_dets, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 16, __pyx_L1_error) } __pyx_pybuffernd_dets.diminfo[0].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_dets.diminfo[0].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_dets.diminfo[1].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_dets.diminfo[1].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[1]; /* "nms/gpu_nms.pyx":18 * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] # <<<<<<<<<<<<<< * cdef int boxes_dim = dets.shape[1] * cdef int num_out */ __pyx_v_boxes_num = (__pyx_v_dets->dimensions[0]); /* "nms/gpu_nms.pyx":19 * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] * cdef int boxes_dim = dets.shape[1] # <<<<<<<<<<<<<< * cdef int num_out * cdef np.ndarray[np.int32_t, ndim=1] \ */ __pyx_v_boxes_dim = (__pyx_v_dets->dimensions[1]); /* "nms/gpu_nms.pyx":22 * cdef int num_out * cdef np.ndarray[np.int32_t, ndim=1] \ * keep = np.zeros(boxes_num, dtype=np.int32) # <<<<<<<<<<<<<< * cdef np.ndarray[np.float32_t, ndim=1] \ * scores = dets[:, 4] */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_boxes_num); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_int32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 22, __pyx_L1_error) __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_keep = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 21, __pyx_L1_error) } else {__pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0]; } } __pyx_t_6 = 0; __pyx_v_keep = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":24 * keep = np.zeros(boxes_num, dtype=np.int32) * cdef np.ndarray[np.float32_t, ndim=1] \ * scores = dets[:, 4] # <<<<<<<<<<<<<< * #cdef np.ndarray[np.int_t, ndim=1] \ // 20160601, by xzn * # order = scores.argsort()[::-1] */ __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_tuple__2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 24, __pyx_L1_error) __pyx_t_7 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_scores.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_scores = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_scores.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 23, __pyx_L1_error) } else {__pyx_pybuffernd_scores.diminfo[0].strides = __pyx_pybuffernd_scores.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_scores.diminfo[0].shape = __pyx_pybuffernd_scores.rcbuffer->pybuffer.shape[0]; } } __pyx_t_7 = 0; __pyx_v_scores = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":28 * # order = scores.argsort()[::-1] * cdef np.ndarray[np.intp_t, ndim=1] \ * order = scores.argsort()[::-1] # <<<<<<<<<<<<<< * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_scores), __pyx_n_s_argsort); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } if (__pyx_t_3) { __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetItem(__pyx_t_5, __pyx_slice__3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 28, __pyx_L1_error) __pyx_t_8 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_order.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_order = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_order.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 27, __pyx_L1_error) } else {__pyx_pybuffernd_order.diminfo[0].strides = __pyx_pybuffernd_order.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_order.diminfo[0].shape = __pyx_pybuffernd_order.rcbuffer->pybuffer.shape[0]; } } __pyx_t_8 = 0; __pyx_v_order = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nms/gpu_nms.pyx":30 * order = scores.argsort()[::-1] * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] # <<<<<<<<<<<<<< * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] */ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_order)); __Pyx_GIVEREF(((PyObject *)__pyx_v_order)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_order)); __Pyx_INCREF(__pyx_slice__4); __Pyx_GIVEREF(__pyx_slice__4); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_slice__4); __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 30, __pyx_L1_error) __pyx_t_9 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer, (PyObject*)__pyx_t_9, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { __pyx_v_sorted_dets = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 29, __pyx_L1_error) } else {__pyx_pybuffernd_sorted_dets.diminfo[0].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_sorted_dets.diminfo[0].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_sorted_dets.diminfo[1].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_sorted_dets.diminfo[1].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[1]; } } __pyx_t_9 = 0; __pyx_v_sorted_dets = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":31 * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) # <<<<<<<<<<<<<< * keep = keep[:num_out] * return list(order[keep]) */ __pyx_t_10 = 0; __pyx_t_11 = -1; if (__pyx_t_10 < 0) { __pyx_t_10 += __pyx_pybuffernd_keep.diminfo[0].shape; if (unlikely(__pyx_t_10 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_keep.diminfo[0].shape)) __pyx_t_11 = 0; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 31, __pyx_L1_error) } __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_11 = -1; if (__pyx_t_12 < 0) { __pyx_t_12 += __pyx_pybuffernd_sorted_dets.diminfo[0].shape; if (unlikely(__pyx_t_12 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_12 >= __pyx_pybuffernd_sorted_dets.diminfo[0].shape)) __pyx_t_11 = 0; if (__pyx_t_13 < 0) { __pyx_t_13 += __pyx_pybuffernd_sorted_dets.diminfo[1].shape; if (unlikely(__pyx_t_13 < 0)) __pyx_t_11 = 1; } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_sorted_dets.diminfo[1].shape)) __pyx_t_11 = 1; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 31, __pyx_L1_error) } __pyx_t_14 = __pyx_PyFloat_AsFloat(__pyx_v_thresh); if (unlikely((__pyx_t_14 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 31, __pyx_L1_error) _nms((&(*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int32_t *, __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_keep.diminfo[0].strides))), (&__pyx_v_num_out), (&(*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_sorted_dets.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_sorted_dets.diminfo[1].strides))), __pyx_v_boxes_num, __pyx_v_boxes_dim, __pyx_t_14, __pyx_v_device_id); /* "nms/gpu_nms.pyx":32 * sorted_dets = dets[order, :] * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] # <<<<<<<<<<<<<< * return list(order[keep]) */ __pyx_t_5 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_v_keep), 0, __pyx_v_num_out, NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 32, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 32, __pyx_L1_error) __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); __pyx_t_11 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_11 < 0)) { PyErr_Fetch(&__pyx_t_15, &__pyx_t_16, &__pyx_t_17); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_v_keep, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16); Py_XDECREF(__pyx_t_17); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_15, __pyx_t_16, __pyx_t_17); } } __pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 32, __pyx_L1_error) } __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_keep, ((PyArrayObject *)__pyx_t_5)); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":33 * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] * return list(order[keep]) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_order), ((PyObject *)__pyx_v_keep)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PySequence_List(__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_keep); __Pyx_XDECREF((PyObject *)__pyx_v_scores); __Pyx_XDECREF((PyObject *)__pyx_v_order); __Pyx_XDECREF((PyObject *)__pyx_v_sorted_dets); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":203 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":206 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":207 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":209 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":211 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":212 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":211 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ goto __pyx_L4; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":214 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ /*else*/ { __pyx_v_copy_shape = 0; } __pyx_L4:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L6_bool_binop_done; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":217 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L6_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 218, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L9_bool_binop_done; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":221 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L9_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 222, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":224 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":225 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":226 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (__pyx_v_copy_shape != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":229 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":230 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":231 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":232 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":233 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":226 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L11; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":235 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":236 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L11:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":237 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":238 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":239 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":242 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef int offset */ __pyx_v_f = NULL; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":243 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":246 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":248 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L15_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L15_bool_binop_done:; if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":250 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":248 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ goto __pyx_L14; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":253 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ /*else*/ { __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L14:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":256 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L20_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_L20_next_or:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":258 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L19_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":259 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 259, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":260 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":261 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = ((char *)"B"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":262 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = ((char *)"h"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":263 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = ((char *)"H"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":264 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = ((char *)"i"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":265 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = ((char *)"I"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":266 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = ((char *)"l"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":267 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = ((char *)"L"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":268 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = ((char *)"q"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":269 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = ((char *)"Q"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":270 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = ((char *)"f"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":271 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = ((char *)"d"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":272 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = ((char *)"g"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":273 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = ((char *)"Zf"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = ((char *)"Zd"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":275 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = ((char *)"Zg"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":276 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = ((char *)"O"); break; default: /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":278 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 278, __pyx_L1_error) break; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":279 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":280 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":282 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)malloc(0xFF)); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":283 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":284 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":285 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 285, __pyx_L1_error) __pyx_v_f = __pyx_t_7; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":288 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":290 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":291 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":292 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":291 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":293 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":294 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":293 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":290 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":770 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":771 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 771, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":770 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":773 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":774 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":773 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":776 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":777 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":776 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":779 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":780 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 780, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":779 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":782 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":783 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":782 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":785 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":790 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":791 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 794, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 794, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 794, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 795, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 796, __pyx_L1_error) } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 796, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 799, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 803, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 803, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 821, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 823, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 844, __pyx_L1_error) } __pyx_L15:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) __PYX_ERR(1, 849, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":785 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":969 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ goto __pyx_L3; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":971 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ /*else*/ { Py_INCREF(__pyx_v_base); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":972 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":973 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":974 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":978 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":980 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif "gpu_nms", 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_D_v_zix_caffe_caffe_win_20160523, __pyx_k_D_v_zix_caffe_caffe_win_20160523, sizeof(__pyx_k_D_v_zix_caffe_caffe_win_20160523), 0, 0, 1, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_argsort, __pyx_k_argsort, sizeof(__pyx_k_argsort), 0, 0, 1, 1}, {&__pyx_n_s_boxes_dim, __pyx_k_boxes_dim, sizeof(__pyx_k_boxes_dim), 0, 0, 1, 1}, {&__pyx_n_s_boxes_num, __pyx_k_boxes_num, sizeof(__pyx_k_boxes_num), 0, 0, 1, 1}, {&__pyx_n_s_dets, __pyx_k_dets, sizeof(__pyx_k_dets), 0, 0, 1, 1}, {&__pyx_n_s_device_id, __pyx_k_device_id, sizeof(__pyx_k_device_id), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_gpu_nms, __pyx_k_gpu_nms, sizeof(__pyx_k_gpu_nms), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_int32, __pyx_k_int32, sizeof(__pyx_k_int32), 0, 0, 1, 1}, {&__pyx_n_s_keep, __pyx_k_keep, sizeof(__pyx_k_keep), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_nms_gpu_nms, __pyx_k_nms_gpu_nms, sizeof(__pyx_k_nms_gpu_nms), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_num_out, __pyx_k_num_out, sizeof(__pyx_k_num_out), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_order, __pyx_k_order, sizeof(__pyx_k_order), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_scores, __pyx_k_scores, sizeof(__pyx_k_scores), 0, 0, 1, 1}, {&__pyx_n_s_sorted_dets, __pyx_k_sorted_dets, sizeof(__pyx_k_sorted_dets), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_thresh, __pyx_k_thresh, sizeof(__pyx_k_thresh), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 218, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(1, 231, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 799, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "nms/gpu_nms.pyx":24 * keep = np.zeros(boxes_num, dtype=np.int32) * cdef np.ndarray[np.float32_t, ndim=1] \ * scores = dets[:, 4] # <<<<<<<<<<<<<< * #cdef np.ndarray[np.int_t, ndim=1] \ // 20160601, by xzn * # order = scores.argsort()[::-1] */ __pyx_slice_ = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice_); __Pyx_GIVEREF(__pyx_slice_); __pyx_tuple__2 = PyTuple_Pack(2, __pyx_slice_, __pyx_int_4); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "nms/gpu_nms.pyx":28 * # order = scores.argsort()[::-1] * cdef np.ndarray[np.intp_t, ndim=1] \ * order = scores.argsort()[::-1] # <<<<<<<<<<<<<< * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] */ __pyx_slice__3 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); /* "nms/gpu_nms.pyx":30 * order = scores.argsort()[::-1] * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] # <<<<<<<<<<<<<< * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] */ __pyx_slice__4 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__4)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__4); __Pyx_GIVEREF(__pyx_slice__4); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":259 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 803, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ __pyx_tuple__11 = PyTuple_Pack(10, __pyx_n_s_dets, __pyx_n_s_thresh, __pyx_n_s_device_id, __pyx_n_s_boxes_num, __pyx_n_s_boxes_dim, __pyx_n_s_num_out, __pyx_n_s_keep, __pyx_n_s_scores, __pyx_n_s_order, __pyx_n_s_sorted_dets); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); __pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(3, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_D_v_zix_caffe_caffe_win_20160523, __pyx_n_s_gpu_nms, 16, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initgpu_nms(void); /*proto*/ PyMODINIT_FUNC initgpu_nms(void) #else PyMODINIT_FUNC PyInit_gpu_nms(void); /*proto*/ PyMODINIT_FUNC PyInit_gpu_nms(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_gpu_nms(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("gpu_nms", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_nms__gpu_nms) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "nms.gpu_nms")) { if (unlikely(PyDict_SetItemString(modules, "nms.gpu_nms", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 155, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 168, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 172, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 181, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 861, __pyx_L1_error) /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "nms/gpu_nms.pyx":8 * # -------------------------------------------------------- * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nms/gpu_nms.pyx":11 * cimport numpy as np * * assert sizeof(int) == sizeof(np.int32_t) # <<<<<<<<<<<<<< * * cdef extern from "gpu_nms.hpp": */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(((sizeof(int)) == (sizeof(__pyx_t_5numpy_int32_t))) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 11, __pyx_L1_error) } } #endif /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3nms_7gpu_nms_1gpu_nms, NULL, __pyx_n_s_nms_gpu_nms); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gpu_nms, __pyx_t_1) < 0) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nms/gpu_nms.pyx":1 * # -------------------------------------------------------- # <<<<<<<<<<<<<< * # Faster R-CNN * # Copyright (c) 2015 Microsoft */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nms.gpu_nms"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* ArgTypeTest */ static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } /* BufferFormatCheck */ static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* BufferIndexError */ static void __Pyx_RaiseBufferIndexError(int axis) { PyErr_Format(PyExc_IndexError, "Out of bounds on buffer access (axis %d)", axis); } /* SliceObject */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { #if CYTHON_COMPILING_IN_CPYTHON PyMappingMethods* mp; #if PY_MAJOR_VERSION < 3 PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; if (likely(ms && ms->sq_slice)) { if (!has_cstart) { if (_py_start && (*_py_start != Py_None)) { cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstart = 0; } if (!has_cstop) { if (_py_stop && (*_py_stop != Py_None)) { cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstop = PY_SSIZE_T_MAX; } if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { Py_ssize_t l = ms->sq_length(obj); if (likely(l >= 0)) { if (cstop < 0) { cstop += l; if (cstop < 0) cstop = 0; } if (cstart < 0) { cstart += l; if (cstart < 0) cstart = 0; } } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) goto bad; PyErr_Clear(); } } return ms->sq_slice(obj, cstart, cstop); } #endif mp = Py_TYPE(obj)->tp_as_mapping; if (likely(mp && mp->mp_subscript)) #endif { PyObject* result; PyObject *py_slice, *py_start, *py_stop; if (_py_slice) { py_slice = *_py_slice; } else { PyObject* owned_start = NULL; PyObject* owned_stop = NULL; if (_py_start) { py_start = *_py_start; } else { if (has_cstart) { owned_start = py_start = PyInt_FromSsize_t(cstart); if (unlikely(!py_start)) goto bad; } else py_start = Py_None; } if (_py_stop) { py_stop = *_py_stop; } else { if (has_cstop) { owned_stop = py_stop = PyInt_FromSsize_t(cstop); if (unlikely(!py_stop)) { Py_XDECREF(owned_start); goto bad; } } else py_stop = Py_None; } py_slice = PySlice_New(py_start, py_stop, Py_None); Py_XDECREF(owned_start); Py_XDECREF(owned_stop); if (unlikely(!py_slice)) goto bad; } #if CYTHON_COMPILING_IN_CPYTHON result = mp->mp_subscript(obj, py_slice); #else result = PyObject_GetItem(obj, py_slice); #endif if (!_py_slice) { Py_DECREF(py_slice); } return result; } PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name); bad: return NULL; } /* BufferFallbackError */ static void __Pyx_RaiseBufferFallbackError(void) { PyErr_SetString(PyExc_ValueError, "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); } /* PyErrFetchRestore */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } Py_DECREF(obj); view->obj = NULL; } #endif /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) { const npy_int32 neg_one = (npy_int32) -1, const_zero = (npy_int32) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(npy_int32) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(npy_int32, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (npy_int32) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (npy_int32) 0; case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, digits[0]) case 2: if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) >= 2 * PyLong_SHIFT) { return (npy_int32) (((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); } } break; case 3: if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) >= 3 * PyLong_SHIFT) { return (npy_int32) (((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); } } break; case 4: if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) >= 4 * PyLong_SHIFT) { return (npy_int32) (((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (npy_int32) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(npy_int32) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(npy_int32) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (npy_int32) 0; case -1: __PYX_VERIFY_RETURN_INT(npy_int32, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, +digits[0]) case -2: if (8 * sizeof(npy_int32) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { return (npy_int32) (((npy_int32)-1)*(((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case 2: if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { return (npy_int32) ((((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case -3: if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { return (npy_int32) (((npy_int32)-1)*(((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case 3: if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { return (npy_int32) ((((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case -4: if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) { return (npy_int32) (((npy_int32)-1)*(((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case 4: if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) { return (npy_int32) ((((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; } #endif if (sizeof(npy_int32) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, long, PyLong_AsLong(x)) } else if (sizeof(npy_int32) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else npy_int32 val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (npy_int32) -1; } } else { npy_int32 val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (npy_int32) -1; val = __Pyx_PyInt_As_npy_int32(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to npy_int32"); return (npy_int32) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to npy_int32"); return (npy_int32) -1; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", module_name, class_name, basicsize, size); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", module_name, class_name, basicsize, size); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else if (__Pyx_PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif #endif } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return __Pyx_NewRef(x); m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
9edc7f985e5d744a2284ba2b270e8d30f72ef30c.cu
// ------------------------------------------------------------------ // Deep Feature Flow // Copyright (c) 2017 Microsoft // Licensed under The MIT License // Written by Yuwen Xiong // ------------------------------------------------------------------ // Based on: // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License // https://github.com/shaoqingren/faster_rcnn // ------------------------------------------------------------------ //#include "gpu_nms.hpp" #include <vector> #include <iostream> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(cudaGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to cudaSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(cudaSetDevice(device_id)); } void _nms(long* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(cudaMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(cudaMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(cudaFree(boxes_dev)); CUDA_CHECK(cudaFree(mask_dev)); } /* Generated by Cython 0.24 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) #error Cython requires Python 2.6+ or Python 3.2+. #else #define CYTHON_ABI "0_24" #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000 #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifndef __cplusplus #error "Cython files generated with the C++ option must be compiled with a C++ compiler." #endif #ifndef CYTHON_INLINE #define CYTHON_INLINE inline #endif template<typename T> void __Pyx_call_destructor(T& x) { x.~T(); } template<typename T> class __Pyx_FakeReference { public: __Pyx_FakeReference() : ptr(NULL) { } __Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { } T *operator->() { return ptr; } operator T&() { return *ptr; } private: T *ptr; }; #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__nms__gpu_nms #define __PYX_HAVE_API__nms__gpu_nms #include "string.h" #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "gpu_nms.hpp" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) && defined (_M_X64) #define __Pyx_sst_abs(value) _abs64(value) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* None.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "nms\\gpu_nms.pyx", "__init__.pxd", "type.pxd", }; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":725 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":726 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":727 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":728 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":732 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":733 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":734 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":735 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":739 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":740 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":749 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":750 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":751 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":754 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":755 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":757 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":758 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":760 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":761 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":762 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":764 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":765 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":766 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* ArgTypeTest.proto */ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /* BufferFormatCheck.proto */ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); // PROTO /* PyObjectGetAttrStr.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* BufferIndexError.proto */ static void __Pyx_RaiseBufferIndexError(int axis); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) /* SliceObject.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** py_start, PyObject** py_stop, PyObject** py_slice, int has_cstart, int has_cstop, int wraparound); /* BufferFallbackError.proto */ static void __Pyx_RaiseBufferFallbackError(void); /* PyThreadStateGet.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET(); #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #endif /* PyErrFetchRestore.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) PyErr_SetObject(PyExc_KeyError, args); Py_XDECREF(args); } return NULL; } Py_INCREF(value); return value; } #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* None.proto */ static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* None.proto */ static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* None.proto */ static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* CIntFromPy.proto */ static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* TypeImport.proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'nms.gpu_nms' */ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t = { "float32_t", NULL, sizeof(__pyx_t_5numpy_float32_t), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t = { "int32_t", NULL, sizeof(__pyx_t_5numpy_int32_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int32_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int32_t), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t = { "intp_t", NULL, sizeof(__pyx_t_5numpy_intp_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_intp_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_intp_t), 0 }; #define __Pyx_MODULE_NAME "nms.gpu_nms" int __pyx_module_is_main_nms__gpu_nms = 0; /* Implementation of 'nms.gpu_nms' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_RuntimeError; static const char __pyx_k_np[] = "np"; static const char __pyx_k_dets[] = "dets"; static const char __pyx_k_keep[] = "keep"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_int32[] = "int32"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_order[] = "order"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_scores[] = "scores"; static const char __pyx_k_thresh[] = "thresh"; static const char __pyx_k_argsort[] = "argsort"; static const char __pyx_k_gpu_nms[] = "gpu_nms"; static const char __pyx_k_num_out[] = "num_out"; static const char __pyx_k_boxes_dim[] = "boxes_dim"; static const char __pyx_k_boxes_num[] = "boxes_num"; static const char __pyx_k_device_id[] = "device_id"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_nms_gpu_nms[] = "nms.gpu_nms"; static const char __pyx_k_sorted_dets[] = "sorted_dets"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_D_v_zix_caffe_caffe_win_20160523[] = "D:\\v-zix\\caffe\\caffe-win-20160523\\models\\py-faster-rcnn-windows\\lib\\nms\\gpu_nms.pyx"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_kp_s_D_v_zix_caffe_caffe_win_20160523; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_argsort; static PyObject *__pyx_n_s_boxes_dim; static PyObject *__pyx_n_s_boxes_num; static PyObject *__pyx_n_s_dets; static PyObject *__pyx_n_s_device_id; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_gpu_nms; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_int32; static PyObject *__pyx_n_s_keep; static PyObject *__pyx_n_s_main; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_nms_gpu_nms; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_num_out; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_order; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_scores; static PyObject *__pyx_n_s_sorted_dets; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_thresh; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static PyObject *__pyx_int_4; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_slice_; static PyObject *__pyx_slice__3; static PyObject *__pyx_slice__4; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_codeobj__12; /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ /* Python wrapper */ static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_3nms_7gpu_nms_1gpu_nms = {"gpu_nms", (PyCFunction)__pyx_pw_3nms_7gpu_nms_1gpu_nms, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_dets = 0; PyObject *__pyx_v_thresh = 0; __pyx_t_5numpy_int32_t __pyx_v_device_id; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gpu_nms (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dets,&__pyx_n_s_thresh,&__pyx_n_s_device_id,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dets)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_thresh)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, 1); __PYX_ERR(0, 16, __pyx_L3_error) } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_device_id); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gpu_nms") < 0)) __PYX_ERR(0, 16, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_dets = ((PyArrayObject *)values[0]); __pyx_v_thresh = ((PyObject*)values[1]); if (values[2]) { __pyx_v_device_id = __Pyx_PyInt_As_npy_int32(values[2]); if (unlikely((__pyx_v_device_id == (npy_int32)-1) && PyErr_Occurred())) __PYX_ERR(0, 17, __pyx_L3_error) } else { __pyx_v_device_id = ((__pyx_t_5numpy_int32_t)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 16, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_dets), __pyx_ptype_5numpy_ndarray, 1, "dets", 0))) __PYX_ERR(0, 16, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_thresh), (&PyFloat_Type), 1, "thresh", 1))) __PYX_ERR(0, 16, __pyx_L1_error) __pyx_r = __pyx_pf_3nms_7gpu_nms_gpu_nms(__pyx_self, __pyx_v_dets, __pyx_v_thresh, __pyx_v_device_id); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id) { int __pyx_v_boxes_num; int __pyx_v_boxes_dim; int __pyx_v_num_out; PyArrayObject *__pyx_v_keep = 0; PyArrayObject *__pyx_v_scores = 0; PyArrayObject *__pyx_v_order = 0; PyArrayObject *__pyx_v_sorted_dets = 0; __Pyx_LocalBuf_ND __pyx_pybuffernd_dets; __Pyx_Buffer __pyx_pybuffer_dets; __Pyx_LocalBuf_ND __pyx_pybuffernd_keep; __Pyx_Buffer __pyx_pybuffer_keep; __Pyx_LocalBuf_ND __pyx_pybuffernd_order; __Pyx_Buffer __pyx_pybuffer_order; __Pyx_LocalBuf_ND __pyx_pybuffernd_scores; __Pyx_Buffer __pyx_pybuffer_scores; __Pyx_LocalBuf_ND __pyx_pybuffernd_sorted_dets; __Pyx_Buffer __pyx_pybuffer_sorted_dets; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; PyArrayObject *__pyx_t_7 = NULL; PyArrayObject *__pyx_t_8 = NULL; PyArrayObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; float __pyx_t_14; PyObject *__pyx_t_15 = NULL; PyObject *__pyx_t_16 = NULL; PyObject *__pyx_t_17 = NULL; __Pyx_RefNannySetupContext("gpu_nms", 0); __pyx_pybuffer_keep.pybuffer.buf = NULL; __pyx_pybuffer_keep.refcount = 0; __pyx_pybuffernd_keep.data = NULL; __pyx_pybuffernd_keep.rcbuffer = &__pyx_pybuffer_keep; __pyx_pybuffer_scores.pybuffer.buf = NULL; __pyx_pybuffer_scores.refcount = 0; __pyx_pybuffernd_scores.data = NULL; __pyx_pybuffernd_scores.rcbuffer = &__pyx_pybuffer_scores; __pyx_pybuffer_order.pybuffer.buf = NULL; __pyx_pybuffer_order.refcount = 0; __pyx_pybuffernd_order.data = NULL; __pyx_pybuffernd_order.rcbuffer = &__pyx_pybuffer_order; __pyx_pybuffer_sorted_dets.pybuffer.buf = NULL; __pyx_pybuffer_sorted_dets.refcount = 0; __pyx_pybuffernd_sorted_dets.data = NULL; __pyx_pybuffernd_sorted_dets.rcbuffer = &__pyx_pybuffer_sorted_dets; __pyx_pybuffer_dets.pybuffer.buf = NULL; __pyx_pybuffer_dets.refcount = 0; __pyx_pybuffernd_dets.data = NULL; __pyx_pybuffernd_dets.rcbuffer = &__pyx_pybuffer_dets; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dets.rcbuffer->pybuffer, (PyObject*)__pyx_v_dets, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 16, __pyx_L1_error) } __pyx_pybuffernd_dets.diminfo[0].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_dets.diminfo[0].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_dets.diminfo[1].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_dets.diminfo[1].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[1]; /* "nms/gpu_nms.pyx":18 * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] # <<<<<<<<<<<<<< * cdef int boxes_dim = dets.shape[1] * cdef int num_out */ __pyx_v_boxes_num = (__pyx_v_dets->dimensions[0]); /* "nms/gpu_nms.pyx":19 * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] * cdef int boxes_dim = dets.shape[1] # <<<<<<<<<<<<<< * cdef int num_out * cdef np.ndarray[np.int32_t, ndim=1] \ */ __pyx_v_boxes_dim = (__pyx_v_dets->dimensions[1]); /* "nms/gpu_nms.pyx":22 * cdef int num_out * cdef np.ndarray[np.int32_t, ndim=1] \ * keep = np.zeros(boxes_num, dtype=np.int32) # <<<<<<<<<<<<<< * cdef np.ndarray[np.float32_t, ndim=1] \ * scores = dets[:, 4] */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_boxes_num); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_int32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 22, __pyx_L1_error) __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_keep = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 21, __pyx_L1_error) } else {__pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0]; } } __pyx_t_6 = 0; __pyx_v_keep = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":24 * keep = np.zeros(boxes_num, dtype=np.int32) * cdef np.ndarray[np.float32_t, ndim=1] \ * scores = dets[:, 4] # <<<<<<<<<<<<<< * #cdef np.ndarray[np.int_t, ndim=1] \ // 20160601, by xzn * # order = scores.argsort()[::-1] */ __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_tuple__2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 24, __pyx_L1_error) __pyx_t_7 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_scores.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_scores = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_scores.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 23, __pyx_L1_error) } else {__pyx_pybuffernd_scores.diminfo[0].strides = __pyx_pybuffernd_scores.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_scores.diminfo[0].shape = __pyx_pybuffernd_scores.rcbuffer->pybuffer.shape[0]; } } __pyx_t_7 = 0; __pyx_v_scores = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":28 * # order = scores.argsort()[::-1] * cdef np.ndarray[np.intp_t, ndim=1] \ * order = scores.argsort()[::-1] # <<<<<<<<<<<<<< * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_scores), __pyx_n_s_argsort); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } if (__pyx_t_3) { __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetItem(__pyx_t_5, __pyx_slice__3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 28, __pyx_L1_error) __pyx_t_8 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_order.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_order = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_order.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 27, __pyx_L1_error) } else {__pyx_pybuffernd_order.diminfo[0].strides = __pyx_pybuffernd_order.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_order.diminfo[0].shape = __pyx_pybuffernd_order.rcbuffer->pybuffer.shape[0]; } } __pyx_t_8 = 0; __pyx_v_order = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nms/gpu_nms.pyx":30 * order = scores.argsort()[::-1] * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] # <<<<<<<<<<<<<< * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] */ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_order)); __Pyx_GIVEREF(((PyObject *)__pyx_v_order)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_order)); __Pyx_INCREF(__pyx_slice__4); __Pyx_GIVEREF(__pyx_slice__4); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_slice__4); __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 30, __pyx_L1_error) __pyx_t_9 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer, (PyObject*)__pyx_t_9, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { __pyx_v_sorted_dets = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 29, __pyx_L1_error) } else {__pyx_pybuffernd_sorted_dets.diminfo[0].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_sorted_dets.diminfo[0].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_sorted_dets.diminfo[1].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_sorted_dets.diminfo[1].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[1]; } } __pyx_t_9 = 0; __pyx_v_sorted_dets = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":31 * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) # <<<<<<<<<<<<<< * keep = keep[:num_out] * return list(order[keep]) */ __pyx_t_10 = 0; __pyx_t_11 = -1; if (__pyx_t_10 < 0) { __pyx_t_10 += __pyx_pybuffernd_keep.diminfo[0].shape; if (unlikely(__pyx_t_10 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_keep.diminfo[0].shape)) __pyx_t_11 = 0; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 31, __pyx_L1_error) } __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_11 = -1; if (__pyx_t_12 < 0) { __pyx_t_12 += __pyx_pybuffernd_sorted_dets.diminfo[0].shape; if (unlikely(__pyx_t_12 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_12 >= __pyx_pybuffernd_sorted_dets.diminfo[0].shape)) __pyx_t_11 = 0; if (__pyx_t_13 < 0) { __pyx_t_13 += __pyx_pybuffernd_sorted_dets.diminfo[1].shape; if (unlikely(__pyx_t_13 < 0)) __pyx_t_11 = 1; } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_sorted_dets.diminfo[1].shape)) __pyx_t_11 = 1; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 31, __pyx_L1_error) } __pyx_t_14 = __pyx_PyFloat_AsFloat(__pyx_v_thresh); if (unlikely((__pyx_t_14 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 31, __pyx_L1_error) _nms((&(*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int32_t *, __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_keep.diminfo[0].strides))), (&__pyx_v_num_out), (&(*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_sorted_dets.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_sorted_dets.diminfo[1].strides))), __pyx_v_boxes_num, __pyx_v_boxes_dim, __pyx_t_14, __pyx_v_device_id); /* "nms/gpu_nms.pyx":32 * sorted_dets = dets[order, :] * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] # <<<<<<<<<<<<<< * return list(order[keep]) */ __pyx_t_5 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_v_keep), 0, __pyx_v_num_out, NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 32, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 32, __pyx_L1_error) __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); __pyx_t_11 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_11 < 0)) { PyErr_Fetch(&__pyx_t_15, &__pyx_t_16, &__pyx_t_17); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_v_keep, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16); Py_XDECREF(__pyx_t_17); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_15, __pyx_t_16, __pyx_t_17); } } __pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 32, __pyx_L1_error) } __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_keep, ((PyArrayObject *)__pyx_t_5)); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":33 * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] * return list(order[keep]) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_order), ((PyObject *)__pyx_v_keep)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PySequence_List(__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_keep); __Pyx_XDECREF((PyObject *)__pyx_v_scores); __Pyx_XDECREF((PyObject *)__pyx_v_order); __Pyx_XDECREF((PyObject *)__pyx_v_sorted_dets); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":203 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":206 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":207 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":209 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":211 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":212 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":211 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ goto __pyx_L4; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":214 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ /*else*/ { __pyx_v_copy_shape = 0; } __pyx_L4:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L6_bool_binop_done; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":217 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L6_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 218, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L9_bool_binop_done; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":221 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L9_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 222, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":224 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":225 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":226 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (__pyx_v_copy_shape != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":229 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":230 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":231 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":232 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":233 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":226 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L11; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":235 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":236 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L11:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":237 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":238 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":239 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":242 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef int offset */ __pyx_v_f = NULL; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":243 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":246 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":248 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L15_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L15_bool_binop_done:; if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":250 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":248 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ goto __pyx_L14; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":253 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ /*else*/ { __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L14:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":256 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L20_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_L20_next_or:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":258 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L19_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":259 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 259, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":260 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":261 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = ((char *)"B"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":262 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = ((char *)"h"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":263 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = ((char *)"H"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":264 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = ((char *)"i"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":265 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = ((char *)"I"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":266 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = ((char *)"l"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":267 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = ((char *)"L"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":268 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = ((char *)"q"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":269 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = ((char *)"Q"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":270 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = ((char *)"f"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":271 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = ((char *)"d"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":272 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = ((char *)"g"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":273 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = ((char *)"Zf"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = ((char *)"Zd"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":275 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = ((char *)"Zg"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":276 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = ((char *)"O"); break; default: /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":278 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 278, __pyx_L1_error) break; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":279 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":280 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":282 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)malloc(0xFF)); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":283 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":284 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":285 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 285, __pyx_L1_error) __pyx_v_f = __pyx_t_7; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":288 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":290 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":291 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":292 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":291 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":293 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":294 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":293 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":290 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":770 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":771 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 771, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":770 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":773 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":774 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":773 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":776 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":777 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":776 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":779 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":780 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 780, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":779 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":782 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":783 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":782 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":785 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":790 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":791 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 794, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 794, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 794, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 795, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 796, __pyx_L1_error) } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 796, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 799, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 803, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 803, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 821, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 823, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 844, __pyx_L1_error) } __pyx_L15:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) __PYX_ERR(1, 849, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":785 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":969 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ goto __pyx_L3; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":971 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ /*else*/ { Py_INCREF(__pyx_v_base); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":972 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":973 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":974 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":978 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":980 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif "gpu_nms", 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_D_v_zix_caffe_caffe_win_20160523, __pyx_k_D_v_zix_caffe_caffe_win_20160523, sizeof(__pyx_k_D_v_zix_caffe_caffe_win_20160523), 0, 0, 1, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_argsort, __pyx_k_argsort, sizeof(__pyx_k_argsort), 0, 0, 1, 1}, {&__pyx_n_s_boxes_dim, __pyx_k_boxes_dim, sizeof(__pyx_k_boxes_dim), 0, 0, 1, 1}, {&__pyx_n_s_boxes_num, __pyx_k_boxes_num, sizeof(__pyx_k_boxes_num), 0, 0, 1, 1}, {&__pyx_n_s_dets, __pyx_k_dets, sizeof(__pyx_k_dets), 0, 0, 1, 1}, {&__pyx_n_s_device_id, __pyx_k_device_id, sizeof(__pyx_k_device_id), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_gpu_nms, __pyx_k_gpu_nms, sizeof(__pyx_k_gpu_nms), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_int32, __pyx_k_int32, sizeof(__pyx_k_int32), 0, 0, 1, 1}, {&__pyx_n_s_keep, __pyx_k_keep, sizeof(__pyx_k_keep), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_nms_gpu_nms, __pyx_k_nms_gpu_nms, sizeof(__pyx_k_nms_gpu_nms), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_num_out, __pyx_k_num_out, sizeof(__pyx_k_num_out), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_order, __pyx_k_order, sizeof(__pyx_k_order), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_scores, __pyx_k_scores, sizeof(__pyx_k_scores), 0, 0, 1, 1}, {&__pyx_n_s_sorted_dets, __pyx_k_sorted_dets, sizeof(__pyx_k_sorted_dets), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_thresh, __pyx_k_thresh, sizeof(__pyx_k_thresh), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 218, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(1, 231, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 799, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "nms/gpu_nms.pyx":24 * keep = np.zeros(boxes_num, dtype=np.int32) * cdef np.ndarray[np.float32_t, ndim=1] \ * scores = dets[:, 4] # <<<<<<<<<<<<<< * #cdef np.ndarray[np.int_t, ndim=1] \ // 20160601, by xzn * # order = scores.argsort()[::-1] */ __pyx_slice_ = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice_); __Pyx_GIVEREF(__pyx_slice_); __pyx_tuple__2 = PyTuple_Pack(2, __pyx_slice_, __pyx_int_4); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "nms/gpu_nms.pyx":28 * # order = scores.argsort()[::-1] * cdef np.ndarray[np.intp_t, ndim=1] \ * order = scores.argsort()[::-1] # <<<<<<<<<<<<<< * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] */ __pyx_slice__3 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); /* "nms/gpu_nms.pyx":30 * order = scores.argsort()[::-1] * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] # <<<<<<<<<<<<<< * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] */ __pyx_slice__4 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__4)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__4); __Pyx_GIVEREF(__pyx_slice__4); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":259 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 803, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ __pyx_tuple__11 = PyTuple_Pack(10, __pyx_n_s_dets, __pyx_n_s_thresh, __pyx_n_s_device_id, __pyx_n_s_boxes_num, __pyx_n_s_boxes_dim, __pyx_n_s_num_out, __pyx_n_s_keep, __pyx_n_s_scores, __pyx_n_s_order, __pyx_n_s_sorted_dets); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); __pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(3, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_D_v_zix_caffe_caffe_win_20160523, __pyx_n_s_gpu_nms, 16, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initgpu_nms(void); /*proto*/ PyMODINIT_FUNC initgpu_nms(void) #else PyMODINIT_FUNC PyInit_gpu_nms(void); /*proto*/ PyMODINIT_FUNC PyInit_gpu_nms(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_gpu_nms(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("gpu_nms", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_nms__gpu_nms) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "nms.gpu_nms")) { if (unlikely(PyDict_SetItemString(modules, "nms.gpu_nms", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 155, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 168, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 172, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 181, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 861, __pyx_L1_error) /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "nms/gpu_nms.pyx":8 * # -------------------------------------------------------- * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nms/gpu_nms.pyx":11 * cimport numpy as np * * assert sizeof(int) == sizeof(np.int32_t) # <<<<<<<<<<<<<< * * cdef extern from "gpu_nms.hpp": */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(((sizeof(int)) == (sizeof(__pyx_t_5numpy_int32_t))) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 11, __pyx_L1_error) } } #endif /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3nms_7gpu_nms_1gpu_nms, NULL, __pyx_n_s_nms_gpu_nms); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gpu_nms, __pyx_t_1) < 0) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nms/gpu_nms.pyx":1 * # -------------------------------------------------------- # <<<<<<<<<<<<<< * # Faster R-CNN * # Copyright (c) 2015 Microsoft */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nms.gpu_nms"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* ArgTypeTest */ static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } /* BufferFormatCheck */ static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* BufferIndexError */ static void __Pyx_RaiseBufferIndexError(int axis) { PyErr_Format(PyExc_IndexError, "Out of bounds on buffer access (axis %d)", axis); } /* SliceObject */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { #if CYTHON_COMPILING_IN_CPYTHON PyMappingMethods* mp; #if PY_MAJOR_VERSION < 3 PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; if (likely(ms && ms->sq_slice)) { if (!has_cstart) { if (_py_start && (*_py_start != Py_None)) { cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstart = 0; } if (!has_cstop) { if (_py_stop && (*_py_stop != Py_None)) { cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstop = PY_SSIZE_T_MAX; } if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { Py_ssize_t l = ms->sq_length(obj); if (likely(l >= 0)) { if (cstop < 0) { cstop += l; if (cstop < 0) cstop = 0; } if (cstart < 0) { cstart += l; if (cstart < 0) cstart = 0; } } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) goto bad; PyErr_Clear(); } } return ms->sq_slice(obj, cstart, cstop); } #endif mp = Py_TYPE(obj)->tp_as_mapping; if (likely(mp && mp->mp_subscript)) #endif { PyObject* result; PyObject *py_slice, *py_start, *py_stop; if (_py_slice) { py_slice = *_py_slice; } else { PyObject* owned_start = NULL; PyObject* owned_stop = NULL; if (_py_start) { py_start = *_py_start; } else { if (has_cstart) { owned_start = py_start = PyInt_FromSsize_t(cstart); if (unlikely(!py_start)) goto bad; } else py_start = Py_None; } if (_py_stop) { py_stop = *_py_stop; } else { if (has_cstop) { owned_stop = py_stop = PyInt_FromSsize_t(cstop); if (unlikely(!py_stop)) { Py_XDECREF(owned_start); goto bad; } } else py_stop = Py_None; } py_slice = PySlice_New(py_start, py_stop, Py_None); Py_XDECREF(owned_start); Py_XDECREF(owned_stop); if (unlikely(!py_slice)) goto bad; } #if CYTHON_COMPILING_IN_CPYTHON result = mp->mp_subscript(obj, py_slice); #else result = PyObject_GetItem(obj, py_slice); #endif if (!_py_slice) { Py_DECREF(py_slice); } return result; } PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name); bad: return NULL; } /* BufferFallbackError */ static void __Pyx_RaiseBufferFallbackError(void) { PyErr_SetString(PyExc_ValueError, "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); } /* PyErrFetchRestore */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } Py_DECREF(obj); view->obj = NULL; } #endif /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) { const npy_int32 neg_one = (npy_int32) -1, const_zero = (npy_int32) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(npy_int32) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(npy_int32, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (npy_int32) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (npy_int32) 0; case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, digits[0]) case 2: if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) >= 2 * PyLong_SHIFT) { return (npy_int32) (((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); } } break; case 3: if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) >= 3 * PyLong_SHIFT) { return (npy_int32) (((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); } } break; case 4: if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) >= 4 * PyLong_SHIFT) { return (npy_int32) (((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (npy_int32) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(npy_int32) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(npy_int32) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (npy_int32) 0; case -1: __PYX_VERIFY_RETURN_INT(npy_int32, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, +digits[0]) case -2: if (8 * sizeof(npy_int32) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { return (npy_int32) (((npy_int32)-1)*(((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case 2: if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { return (npy_int32) ((((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case -3: if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { return (npy_int32) (((npy_int32)-1)*(((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case 3: if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { return (npy_int32) ((((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case -4: if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) { return (npy_int32) (((npy_int32)-1)*(((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case 4: if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) { return (npy_int32) ((((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; } #endif if (sizeof(npy_int32) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, long, PyLong_AsLong(x)) } else if (sizeof(npy_int32) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else npy_int32 val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (npy_int32) -1; } } else { npy_int32 val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (npy_int32) -1; val = __Pyx_PyInt_As_npy_int32(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to npy_int32"); return (npy_int32) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to npy_int32"); return (npy_int32) -1; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", module_name, class_name, basicsize, size); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", module_name, class_name, basicsize, size); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else if (__Pyx_PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif #endif } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return __Pyx_NewRef(x); m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
eb733ff8554233fb76e42d6fb934771564301bc3.hip
// !!! This is a file automatically generated by hipify!!! //---------------------------------------------------------------------------// // Copyright (c) 2013-2014 Kyle Lutz <[email protected]> // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // // See http://boostorg.github.com/compute for more information. //---------------------------------------------------------------------------// #include <iostream> #include <iterator> #include <algorithm> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/host_vector.h> #include <thrust/transform.h> #include "perf.hpp" struct saxpy_functor : public thrust::binary_function<float,float,float> { const float a; saxpy_functor(float _a) : a(_a) {} __host__ __device__ float operator()(const float& x, const float& y) const { return a * x + y; } }; int main(int argc, char *argv[]) { perf_parse_args(argc, argv); std::cout << "size: " << PERF_N << std::endl; thrust::host_vector<int> host_x(PERF_N); thrust::host_vector<int> host_y(PERF_N); std::generate(host_x.begin(), host_x.end(), rand); std::generate(host_y.begin(), host_y.end(), rand); // transfer data to the device thrust::device_vector<int> device_x = host_x; thrust::device_vector<int> device_y = host_y; perf_timer t; for(size_t trial = 0; trial < PERF_TRIALS; trial++){ t.start(); thrust::transform(device_x.begin(), device_x.end(), device_y.begin(), device_y.begin(), saxpy_functor(2.5f)); hipDeviceSynchronize(); t.stop(); } std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl; // transfer data back to host thrust::copy(device_x.begin(), device_x.end(), host_x.begin()); thrust::copy(device_y.begin(), device_y.end(), host_y.begin()); return 0; }
eb733ff8554233fb76e42d6fb934771564301bc3.cu
//---------------------------------------------------------------------------// // Copyright (c) 2013-2014 Kyle Lutz <[email protected]> // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // // See http://boostorg.github.com/compute for more information. //---------------------------------------------------------------------------// #include <iostream> #include <iterator> #include <algorithm> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/host_vector.h> #include <thrust/transform.h> #include "perf.hpp" struct saxpy_functor : public thrust::binary_function<float,float,float> { const float a; saxpy_functor(float _a) : a(_a) {} __host__ __device__ float operator()(const float& x, const float& y) const { return a * x + y; } }; int main(int argc, char *argv[]) { perf_parse_args(argc, argv); std::cout << "size: " << PERF_N << std::endl; thrust::host_vector<int> host_x(PERF_N); thrust::host_vector<int> host_y(PERF_N); std::generate(host_x.begin(), host_x.end(), rand); std::generate(host_y.begin(), host_y.end(), rand); // transfer data to the device thrust::device_vector<int> device_x = host_x; thrust::device_vector<int> device_y = host_y; perf_timer t; for(size_t trial = 0; trial < PERF_TRIALS; trial++){ t.start(); thrust::transform(device_x.begin(), device_x.end(), device_y.begin(), device_y.begin(), saxpy_functor(2.5f)); cudaDeviceSynchronize(); t.stop(); } std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl; // transfer data back to host thrust::copy(device_x.begin(), device_x.end(), host_x.begin()); thrust::copy(device_y.begin(), device_y.end(), host_y.begin()); return 0; }
a5724c9a8bf3c2e09b5908067526e6ad5f946892.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <sys/time.h> #include <ctime> #include <fstream> #include <cmath> #include <cstdlib> using namespace std; //Eratosthanes' sieve on odds __global__ static void sieve(char *primes, int n, int root) { int i = blockIdx.x * blockDim.x + threadIdx.x + 3; if (i < root && primes[i] == 0) { for (long j = i * i; j <= n; j += i) { primes[j] = 1; } } } //Eratosthanes' sieve on evens __global__ static void Evens(char* P, int n) { long i = blockIdx.x * blockDim.x + threadIdx.x + threadIdx.x + 4; if (i < n) { P[i] = 1; } } __global__ static void Init(char* P) { P[0] = 1; P[1] = 1; } __host__ void isPrime(char* P, int max) { int blockSize = 32; long root = sqrt(max); char* d_Primes = NULL; long sizePrimes = sizeof(char) * max; hipMalloc(&d_Primes, sizePrimes); hipMemset(d_Primes, 0, sizePrimes); dim3 dimBlock(blockSize); dim3 dimGrid((root + dimBlock.x) / dimBlock.x); dim3 dimGridEven((max + dimBlock.x) / dimBlock.x); hipLaunchKernelGGL(( Init), dim3(1),dim3(1), 0, 0, d_Primes); hipLaunchKernelGGL(( Evens), dim3(dimGridEven), dim3(dimBlock), 0, 0, d_Primes, max); hipLaunchKernelGGL(( sieve), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Primes, max, root); hipMemcpy(P, d_Primes, sizePrimes, hipMemcpyDeviceToHost); hipFree(d_Primes); } int main(){ struct timeval start, end; long mtime, seconds, useconds; char *primes; long long sum; long long num; cout << "enter number to sum primes to: " << endl; cin >> num; primes = (char*)malloc(num); memset(primes, 0, num); if (num < 2) { cout << "no primes to sum!" << endl;; return 0; } else{ sum = 2; } gettimeofday(&start, NULL); isPrime(primes, num); for (long n = 3; n <= num - 1; n += 2) { if (primes[n] == 0){ //Indicates primacy //cout << n << " is prime." << endl; sum += n; if(num >= 1 + n*n && num < (n+1)*(n + 1)) { sum -= n*n; } } } free(primes); gettimeofday(&end, NULL); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtime = ((seconds) * 1000 + useconds/1000.0); cout << "sum under " << num << " is " << sum << endl; cout << "time: " << mtime << " milliseconds\n" << endl; return 0; }
a5724c9a8bf3c2e09b5908067526e6ad5f946892.cu
#include <iostream> #include <sys/time.h> #include <ctime> #include <fstream> #include <cmath> #include <cstdlib> using namespace std; //Eratosthanes' sieve on odds __global__ static void sieve(char *primes, int n, int root) { int i = blockIdx.x * blockDim.x + threadIdx.x + 3; if (i < root && primes[i] == 0) { for (long j = i * i; j <= n; j += i) { primes[j] = 1; } } } //Eratosthanes' sieve on evens __global__ static void Evens(char* P, int n) { long i = blockIdx.x * blockDim.x + threadIdx.x + threadIdx.x + 4; if (i < n) { P[i] = 1; } } __global__ static void Init(char* P) { P[0] = 1; P[1] = 1; } __host__ void isPrime(char* P, int max) { int blockSize = 32; long root = sqrt(max); char* d_Primes = NULL; long sizePrimes = sizeof(char) * max; cudaMalloc(&d_Primes, sizePrimes); cudaMemset(d_Primes, 0, sizePrimes); dim3 dimBlock(blockSize); dim3 dimGrid((root + dimBlock.x) / dimBlock.x); dim3 dimGridEven((max + dimBlock.x) / dimBlock.x); Init<<<1,1>>>(d_Primes); Evens<<<dimGridEven, dimBlock>>>(d_Primes, max); sieve<<<dimGrid, dimBlock>>>(d_Primes, max, root); cudaMemcpy(P, d_Primes, sizePrimes, cudaMemcpyDeviceToHost); cudaFree(d_Primes); } int main(){ struct timeval start, end; long mtime, seconds, useconds; char *primes; long long sum; long long num; cout << "enter number to sum primes to: " << endl; cin >> num; primes = (char*)malloc(num); memset(primes, 0, num); if (num < 2) { cout << "no primes to sum!" << endl;; return 0; } else{ sum = 2; } gettimeofday(&start, NULL); isPrime(primes, num); for (long n = 3; n <= num - 1; n += 2) { if (primes[n] == 0){ //Indicates primacy //cout << n << " is prime." << endl; sum += n; if(num >= 1 + n*n && num < (n+1)*(n + 1)) { sum -= n*n; } } } free(primes); gettimeofday(&end, NULL); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtime = ((seconds) * 1000 + useconds/1000.0); cout << "sum under " << num << " is " << sum << endl; cout << "time: " << mtime << " milliseconds\n" << endl; return 0; }
10f31187a4dd9d75844051e4fbc3364404d5b05d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // sudo nvprof --unified-memory-profiling off ./ManagedMemoryVecAdd // Use this command for profiling without errors for unified memory profiling #include<iostream> __global__ void vecAdd(int *a, int *b, int *c, int N){ int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < N){ c[i] = a[i] + b[i]; } } __global__ void squareVec(int *a, int *b, int N){ int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < N){ b[i] = a[i]*a[i]; } } int main(){ int N = 20; size_t size = N * sizeof(int); int *a, *b, *c; hipMallocManaged(&a, size); // Unified memory; ALWAYS use cudaMemPrefetchAync() with Unified memory to reduce overhead time hipMallocManaged(&b, size); hipMallocManaged(&c, size); for(auto i = 0; i < N; i ++){ a[i] = i; b[i] = 2*i; } int id = hipGetDevice(&id); // Get the device ID hipMemPrefetchAsync(a, size, id); // Use the device ID to prefetch 'a' to the GPU memory hipMemPrefetchAsync(b, size, id); hipMemPrefetchAsync(c, size, id); int NumThreadsPerBlock = 256; int BlockSize = (N + NumThreadsPerBlock -1)/NumThreadsPerBlock;hipLaunchKernelGGL(( vecAdd), dim3(BlockSize), dim3(NumThreadsPerBlock), 0, 0, a, b, c, N); hipDeviceSynchronize(); // Sunchronize all the threads before moving forward hipMemPrefetchAsync(a, size, hipCpuDeviceId); // Prefetch 'a' to the CPU memory; directly use built-in function hipCpuDeviceId hipMemPrefetchAsync(b, size, hipCpuDeviceId); hipMemPrefetchAsync(c, size, hipCpuDeviceId); std::cout << "Printing the vector" << std::endl; for(auto i = 0; i < N; i++){ std::cout << c[i] << std::endl; } hipFree(a); hipFree(b); int *c_squared; hipMallocManaged(&c_squared, size); int id2 = hipGetDevice(&id); hipMemPrefetchAsync(c, size, id); hipMemPrefetchAsync(c_squared, size, id2); hipLaunchKernelGGL(( squareVec), dim3(BlockSize), dim3(NumThreadsPerBlock), 0, 0, c,c_squared, N); hipDeviceSynchronize(); hipMemPrefetchAsync(c, size, hipCpuDeviceId); hipMemPrefetchAsync(c_squared, size, hipCpuDeviceId); std::cout << "Printing the vector squared" << std::endl; for(auto i = 0; i < N; i++){ std::cout << c_squared[i] << std::endl; } hipFree(c_squared); hipFree(c); return 0; }
10f31187a4dd9d75844051e4fbc3364404d5b05d.cu
// sudo nvprof --unified-memory-profiling off ./ManagedMemoryVecAdd // Use this command for profiling without errors for unified memory profiling #include<iostream> __global__ void vecAdd(int *a, int *b, int *c, int N){ int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < N){ c[i] = a[i] + b[i]; } } __global__ void squareVec(int *a, int *b, int N){ int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < N){ b[i] = a[i]*a[i]; } } int main(){ int N = 20; size_t size = N * sizeof(int); int *a, *b, *c; cudaMallocManaged(&a, size); // Unified memory; ALWAYS use cudaMemPrefetchAync() with Unified memory to reduce overhead time cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); for(auto i = 0; i < N; i ++){ a[i] = i; b[i] = 2*i; } int id = cudaGetDevice(&id); // Get the device ID cudaMemPrefetchAsync(a, size, id); // Use the device ID to prefetch 'a' to the GPU memory cudaMemPrefetchAsync(b, size, id); cudaMemPrefetchAsync(c, size, id); int NumThreadsPerBlock = 256; int BlockSize = (N + NumThreadsPerBlock -1)/NumThreadsPerBlock; vecAdd<<<BlockSize, NumThreadsPerBlock>>>(a, b, c, N); cudaDeviceSynchronize(); // Sunchronize all the threads before moving forward cudaMemPrefetchAsync(a, size, cudaCpuDeviceId); // Prefetch 'a' to the CPU memory; directly use built-in function cudaCpuDeviceId cudaMemPrefetchAsync(b, size, cudaCpuDeviceId); cudaMemPrefetchAsync(c, size, cudaCpuDeviceId); std::cout << "Printing the vector" << std::endl; for(auto i = 0; i < N; i++){ std::cout << c[i] << std::endl; } cudaFree(a); cudaFree(b); int *c_squared; cudaMallocManaged(&c_squared, size); int id2 = cudaGetDevice(&id); cudaMemPrefetchAsync(c, size, id); cudaMemPrefetchAsync(c_squared, size, id2); squareVec<<<BlockSize, NumThreadsPerBlock>>>(c,c_squared, N); cudaDeviceSynchronize(); cudaMemPrefetchAsync(c, size, cudaCpuDeviceId); cudaMemPrefetchAsync(c_squared, size, cudaCpuDeviceId); std::cout << "Printing the vector squared" << std::endl; for(auto i = 0; i < N; i++){ std::cout << c_squared[i] << std::endl; } cudaFree(c_squared); cudaFree(c); return 0; }
41b4d3b1fe2b1f0571eb363973cbea250a3ddc8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This example explains how to divide the host and * device code into separate files using vector addition */ #include "kernel.h" #define N 64 __global__ void addKernel(float *a,float *b) { int idx=threadIdx.x+blockIdx.x*blockDim.x; if(idx>=N) return; a[idx]+=b[idx]; } void vectorAdd() { //host memory float *h_arr1,*h_arr2,*h_res; size_t size=N*sizeof(float); //allocate host memory h_arr1=(float*)malloc(size); h_arr2=(float*)malloc(size); h_res=(float*)malloc(size); //populate the host arrays for(int i=0;i<N;i++) { h_arr1[i]=i+1; h_arr2[i]=i+2; } //device memory float *d_arr1,*d_arr2; //allocate device memory hipMalloc((void**)&d_arr1,size); hipMalloc((void**)&d_arr2,size); //copy host to device hipMemcpy(d_arr1,h_arr1,size,hipMemcpyHostToDevice); hipMemcpy(d_arr2,h_arr2,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( addKernel), dim3(8),dim3(8), 0, 0, d_arr1,d_arr2); //copy result device to host hipMemcpy(h_res,d_arr1,size,hipMemcpyDeviceToHost); //print result array for(int i=10;i<20;i++) cout << h_arr1[i] << " "; cout << endl; for(int i=10;i<20;i++) cout << h_arr2[i] << " "; cout << endl; for(int i=10;i<20;i++) cout << h_res[i] << " "; cout << endl; //free host and device memory free(h_arr1); free(h_arr2); free(h_res); hipFree(d_arr1); hipFree(d_arr2); }
41b4d3b1fe2b1f0571eb363973cbea250a3ddc8b.cu
/* * This example explains how to divide the host and * device code into separate files using vector addition */ #include "kernel.h" #define N 64 __global__ void addKernel(float *a,float *b) { int idx=threadIdx.x+blockIdx.x*blockDim.x; if(idx>=N) return; a[idx]+=b[idx]; } void vectorAdd() { //host memory float *h_arr1,*h_arr2,*h_res; size_t size=N*sizeof(float); //allocate host memory h_arr1=(float*)malloc(size); h_arr2=(float*)malloc(size); h_res=(float*)malloc(size); //populate the host arrays for(int i=0;i<N;i++) { h_arr1[i]=i+1; h_arr2[i]=i+2; } //device memory float *d_arr1,*d_arr2; //allocate device memory cudaMalloc((void**)&d_arr1,size); cudaMalloc((void**)&d_arr2,size); //copy host to device cudaMemcpy(d_arr1,h_arr1,size,cudaMemcpyHostToDevice); cudaMemcpy(d_arr2,h_arr2,size,cudaMemcpyHostToDevice); addKernel<<<8,8>>>(d_arr1,d_arr2); //copy result device to host cudaMemcpy(h_res,d_arr1,size,cudaMemcpyDeviceToHost); //print result array for(int i=10;i<20;i++) cout << h_arr1[i] << " "; cout << endl; for(int i=10;i<20;i++) cout << h_arr2[i] << " "; cout << endl; for(int i=10;i<20;i++) cout << h_res[i] << " "; cout << endl; //free host and device memory free(h_arr1); free(h_arr2); free(h_res); cudaFree(d_arr1); cudaFree(d_arr2); }
e4cef96fe37ab2986e9875037f884879ca73b0ed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" __global__ void CorrKernel(const float* gpuIm1, const float* gpuIm2, float* R, int N1, int N2, int M); void checkCUDAError(const char *msg); void GPUCorr(float* R, const float* im1, const float* im2, int M, int N1, int N2) { // pointer to device memory. float* gpuR; float* gpuIm1; float* gpuIm2; printf("GPUCorr: N1(voxel num) = %d, N2 = %d, M (time series length)= %d\n.", N1, N2, M); /* create input and output array on GPU. */ hipMalloc((void**) &gpuR, sizeof(float)*N1*N2); checkCUDAError("Allocate device gpuR"); hipMalloc((void**) &gpuIm1, sizeof(float)*N1*M); checkCUDAError("Allocate device gpuIm1"); hipMalloc((void**) &gpuIm2, sizeof(float)*N2*M); checkCUDAError("Allocate device, gpuIm2."); /* host to device memory. */ hipMemcpy(gpuR, R, sizeof(float)*N1*N2, hipMemcpyHostToDevice); hipMemcpy(gpuIm1, im1, sizeof(float)*N1*M, hipMemcpyHostToDevice); hipMemcpy(gpuIm2, im2, sizeof(float)*N2*M, hipMemcpyHostToDevice); checkCUDAError("memory copy from Host to Device"); /* run the kernel function. */ int blockSize = 16; int gridDimx = N1/blockSize + (N1%blockSize == 0?0:1); int gridDimy = N2/blockSize + (N2%blockSize == 0?0:1); printf("GPUCorr, blockSize: %dx%d, gridDim = %dx%d\n", blockSize, blockSize, gridDimx, gridDimy); dim3 dimBlock(blockSize, blockSize); dim3 dimGrid(gridDimx, gridDimy); hipLaunchKernelGGL(( CorrKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, gpuIm1, gpuIm2, gpuR, N1, N2, M); checkCUDAError(" GPUCorr, main, call Kernel."); /* Send results back to cpu memeory */ hipMemcpy(R, gpuR, sizeof(float)*N1*N2, hipMemcpyDeviceToHost); checkCUDAError("GPUCorr, main, memcopy from Device to Host."); printf("R[1][1] = %f\n", *(R + N1*1+1)); /* clean up. */ hipFree(gpuR); hipFree(gpuIm1); hipFree(gpuIm2); } /* Kernel function */ __global__ void CorrKernel(const float* gpuIm1, const float* gpuIm2, float* R, int N1, int N2, int M) { int m = 0; float mean1 = 0; float mean2 = 0; float std1 = 0; float std2 = 0; int lidx1, lidx2; float r = 0; // temp variable for sample correlation. int idx1 = blockIdx.x * blockDim.x + threadIdx.x; int idx2 = blockIdx.y * blockDim.y + threadIdx.y; if ((idx1 >= N1) | (idx2 >= N2)){ return; } if (idx1 == idx2) { *(R+idx1*N2 + idx2) = 1; } // mean of first vector. for (m = 0; m < M; m++){ lidx1 = idx1*M + m; lidx2 = idx2*M + m; mean1 = mean1 + *(gpuIm1 + lidx1)/M; mean2 = mean2 + *(gpuIm2 + lidx2)/M; } /* Standard deviation. */ for (m = 0; m < M; m++){ lidx1 = idx1 * M + m; lidx2 = idx2 * M + m; std1 = std1 + pow((*(gpuIm1+lidx1) - mean1), 2)/(M-1); std2 = std2 + pow((*(gpuIm2+lidx2) - mean2), 2)/(M-1); } std1 = sqrt(std1); std2 = sqrt(std2); /* Sample Correlation. */ if (std1 == 0 | std2 == 0){ *(R + idx1*N2 + idx2) = 0; } else{ for (m = 0; m < M; m++){ lidx1 = idx1 * M + m; lidx2 = idx2 * M + m; r = r + (*(gpuIm1+lidx1) - mean1) * (*(gpuIm2 + lidx2)-mean2)/((M-1)*std1*std2); } *(R+idx1*N2 + idx2) = r; } } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); //exit(-1); } }
e4cef96fe37ab2986e9875037f884879ca73b0ed.cu
#include "common.h" __global__ void CorrKernel(const float* gpuIm1, const float* gpuIm2, float* R, int N1, int N2, int M); void checkCUDAError(const char *msg); void GPUCorr(float* R, const float* im1, const float* im2, int M, int N1, int N2) { // pointer to device memory. float* gpuR; float* gpuIm1; float* gpuIm2; printf("GPUCorr: N1(voxel num) = %d, N2 = %d, M (time series length)= %d\n.", N1, N2, M); /* create input and output array on GPU. */ cudaMalloc((void**) &gpuR, sizeof(float)*N1*N2); checkCUDAError("Allocate device gpuR"); cudaMalloc((void**) &gpuIm1, sizeof(float)*N1*M); checkCUDAError("Allocate device gpuIm1"); cudaMalloc((void**) &gpuIm2, sizeof(float)*N2*M); checkCUDAError("Allocate device, gpuIm2."); /* host to device memory. */ cudaMemcpy(gpuR, R, sizeof(float)*N1*N2, cudaMemcpyHostToDevice); cudaMemcpy(gpuIm1, im1, sizeof(float)*N1*M, cudaMemcpyHostToDevice); cudaMemcpy(gpuIm2, im2, sizeof(float)*N2*M, cudaMemcpyHostToDevice); checkCUDAError("memory copy from Host to Device"); /* run the kernel function. */ int blockSize = 16; int gridDimx = N1/blockSize + (N1%blockSize == 0?0:1); int gridDimy = N2/blockSize + (N2%blockSize == 0?0:1); printf("GPUCorr, blockSize: %dx%d, gridDim = %dx%d\n", blockSize, blockSize, gridDimx, gridDimy); dim3 dimBlock(blockSize, blockSize); dim3 dimGrid(gridDimx, gridDimy); CorrKernel<<<dimGrid, dimBlock>>>(gpuIm1, gpuIm2, gpuR, N1, N2, M); checkCUDAError(" GPUCorr, main, call Kernel."); /* Send results back to cpu memeory */ cudaMemcpy(R, gpuR, sizeof(float)*N1*N2, cudaMemcpyDeviceToHost); checkCUDAError("GPUCorr, main, memcopy from Device to Host."); printf("R[1][1] = %f\n", *(R + N1*1+1)); /* clean up. */ cudaFree(gpuR); cudaFree(gpuIm1); cudaFree(gpuIm2); } /* Kernel function */ __global__ void CorrKernel(const float* gpuIm1, const float* gpuIm2, float* R, int N1, int N2, int M) { int m = 0; float mean1 = 0; float mean2 = 0; float std1 = 0; float std2 = 0; int lidx1, lidx2; float r = 0; // temp variable for sample correlation. int idx1 = blockIdx.x * blockDim.x + threadIdx.x; int idx2 = blockIdx.y * blockDim.y + threadIdx.y; if ((idx1 >= N1) | (idx2 >= N2)){ return; } if (idx1 == idx2) { *(R+idx1*N2 + idx2) = 1; } // mean of first vector. for (m = 0; m < M; m++){ lidx1 = idx1*M + m; lidx2 = idx2*M + m; mean1 = mean1 + *(gpuIm1 + lidx1)/M; mean2 = mean2 + *(gpuIm2 + lidx2)/M; } /* Standard deviation. */ for (m = 0; m < M; m++){ lidx1 = idx1 * M + m; lidx2 = idx2 * M + m; std1 = std1 + pow((*(gpuIm1+lidx1) - mean1), 2)/(M-1); std2 = std2 + pow((*(gpuIm2+lidx2) - mean2), 2)/(M-1); } std1 = sqrt(std1); std2 = sqrt(std2); /* Sample Correlation. */ if (std1 == 0 | std2 == 0){ *(R + idx1*N2 + idx2) = 0; } else{ for (m = 0; m < M; m++){ lidx1 = idx1 * M + m; lidx2 = idx2 * M + m; r = r + (*(gpuIm1+lidx1) - mean1) * (*(gpuIm2 + lidx2)-mean2)/((M-1)*std1*std2); } *(R+idx1*N2 + idx2) = r; } } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); //exit(-1); } }
ebb959ed5c9e3ee136020b4353bea807574af758.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuimage/cuda/arithmetic.h" #include "cuimage/cuda/devptr.h" #include "cuimage/cuda/kernel.h" #include "cuimage/cuda/utils.h" #include "cuimage/operations/math_cu.h" #include <nvfunctional> namespace cuimage { template <typename T, typename Op> __global__ void g_Apply(DevPtr<T> input1, const DevPtr<T> input2, Op operation) { const dim3 pos = getPos(blockIdx, blockDim, threadIdx); if (pos.x >= input1.width || pos.y >= input1.height) return; operation.op()(input1(pos.x, pos.y), input2(pos.x, pos.y)); } template <typename T, typename Op> __global__ void g_Apply(DevPtr<T> input, Op operation) { const dim3 pos = getPos(blockIdx, blockDim, threadIdx); if (pos.x >= input.width || pos.y >= input.height) return; operation.op()(input(pos.x, pos.y)); } template <typename T, typename Op> void cu_Apply(DevPtr<T> input1, const DevPtr<T> input2, Op operation) { assert(input1.width == input2.width); assert(input1.height == input2.height); dim3 block = block2D(32); dim3 grid = grid2D(input1.width, input1.height, block); hipLaunchKernelGGL(( g_Apply), dim3(grid), dim3(block), 0, 0, input1, input2, operation); cudaCheckLastCall(); #ifdef DEBUG cudaSafeCall(hipDeviceSynchronize()); #endif } template <typename T, typename Op> void cu_Apply(DevPtr<T> input, Op operation) { dim3 block = block2D(32); dim3 grid = grid2D(input.width, input.height, block); hipLaunchKernelGGL(( g_Apply), dim3(grid), dim3(block), 0, 0, input, operation); cudaCheckLastCall(); #ifdef DEBUG cudaSafeCall(hipDeviceSynchronize()); #endif } template <typename T> struct AddTo { AddTo() {} nvstd::function<void(T&, const T&)> __device__ op() { return [*this] __device__(T & val1, const T& val2) { val1 += val2; }; } }; template <typename T> struct SubFrom { SubFrom() {} nvstd::function<void(T&, const T&)> __device__ op() { return [*this] __device__(T & val1, const T& val2) { val1 -= val2; }; } }; template <typename T> struct MulBy { MulBy() {} nvstd::function<void(T&, const T&)> __device__ op() { return [*this] __device__(T & val1, const T& val2) { val1 *= val2; }; } }; template <typename T> struct DivBy { DivBy() {} nvstd::function<void(T&, const T&)> __device__ op() { return [*this] __device__(T & val1, const T& val2) { val1 /= val2; }; } }; template <typename T> void cu_AddTo(DevPtr<T> image, const DevPtr<T>& other) { AddTo<T> add_op; cu_Apply(image, other, add_op); } template <typename T> void cu_MultiplyBy(DevPtr<T> image, const DevPtr<T>& other) { MulBy<T> mul_op; cu_Apply(image, other, mul_op); } template <typename T> void cu_SubtractFrom(DevPtr<T> image, const DevPtr<T>& other) { SubFrom<T> sub_op; cu_Apply(image, other, sub_op); } template <typename T> void cu_DivideBy(DevPtr<T> image, const DevPtr<T>& other) { DivBy<T> div_op; cu_Apply(image, other, div_op); } template <typename T> struct AddValTo { AddValTo(const T& val) : val_(val) { } nvstd::function<void(T&)> __device__ op() { return [*this] __device__(T & val) { val += val_; }; } private: const T val_; }; template <typename T> struct SubValFrom { SubValFrom(const T& val) : val_(val) { } nvstd::function<void(T&)> __device__ op() { return [*this] __device__(T & val) { val -= val_; }; } private: const T val_; }; template <typename T> struct MulByVal { MulByVal(const T& val) : val_(val) { } nvstd::function<void(T&)> __device__ op() { return [*this] __device__(T & val) { val *= val_; }; } private: const T val_; }; template <typename T> struct DivByVal { DivByVal(const T& val) : val_(val) { } nvstd::function<void(T&)> __device__ op() { return [*this] __device__(T & val) { val /= val_; }; } private: const T val_; }; template <typename T> void cu_AddTo(DevPtr<T> image, const T& value) { AddValTo<T> add_op(value); cu_Apply(image, add_op); } template <typename T> void cu_MultiplyBy(DevPtr<T> image, const T& value) { MulByVal<T> mul_op(value); cu_Apply(image, mul_op); } template <typename T> void cu_SubtractFrom(DevPtr<T> image, const T& value) { SubValFrom<T> sub_op(value); cu_Apply(image, sub_op); } template <typename T> void cu_DivideBy(DevPtr<T> image, const T& value) { DivByVal<T> div_op(value); cu_Apply(image, div_op); } /** * Explicit instantiations */ #define DECLARE_MATH_OPERATION(type, function) \ template void cu_Apply(DevPtr<type>, const DevPtr<type>, function<type>); #define DECLARE_MATH_FUNCTION(type, function) \ template void function(DevPtr<type>, const DevPtr<type>&); FOR_EACH_TYPE(DECLARE_MATH_OPERATION, AddTo) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_AddTo) FOR_EACH_TYPE(DECLARE_MATH_OPERATION, SubFrom) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_SubtractFrom) FOR_EACH_TYPE(DECLARE_MATH_OPERATION, MulBy) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_MultiplyBy) FOR_EACH_TYPE(DECLARE_MATH_OPERATION, DivBy) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_DivideBy) #undef DECLARE_MATH_OPERATION #define DECLARE_MATH_OPERATION(type, function) \ template void cu_Apply(DevPtr<type>, function<type>); #undef DECLARE_MATH_FUNCTION #define DECLARE_MATH_FUNCTION(type, function) \ template void function(DevPtr<type>, const type&); FOR_EACH_TYPE(DECLARE_MATH_OPERATION, AddValTo) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_AddTo) FOR_EACH_TYPE(DECLARE_MATH_OPERATION, SubValFrom) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_SubtractFrom) FOR_EACH_TYPE(DECLARE_MATH_OPERATION, MulByVal) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_MultiplyBy) FOR_EACH_TYPE(DECLARE_MATH_OPERATION, DivByVal) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_DivideBy) } // image
ebb959ed5c9e3ee136020b4353bea807574af758.cu
#include "cuimage/cuda/arithmetic.h" #include "cuimage/cuda/devptr.h" #include "cuimage/cuda/kernel.h" #include "cuimage/cuda/utils.h" #include "cuimage/operations/math_cu.h" #include <nvfunctional> namespace cuimage { template <typename T, typename Op> __global__ void g_Apply(DevPtr<T> input1, const DevPtr<T> input2, Op operation) { const dim3 pos = getPos(blockIdx, blockDim, threadIdx); if (pos.x >= input1.width || pos.y >= input1.height) return; operation.op()(input1(pos.x, pos.y), input2(pos.x, pos.y)); } template <typename T, typename Op> __global__ void g_Apply(DevPtr<T> input, Op operation) { const dim3 pos = getPos(blockIdx, blockDim, threadIdx); if (pos.x >= input.width || pos.y >= input.height) return; operation.op()(input(pos.x, pos.y)); } template <typename T, typename Op> void cu_Apply(DevPtr<T> input1, const DevPtr<T> input2, Op operation) { assert(input1.width == input2.width); assert(input1.height == input2.height); dim3 block = block2D(32); dim3 grid = grid2D(input1.width, input1.height, block); g_Apply<<<grid, block>>>(input1, input2, operation); cudaCheckLastCall(); #ifdef DEBUG cudaSafeCall(cudaDeviceSynchronize()); #endif } template <typename T, typename Op> void cu_Apply(DevPtr<T> input, Op operation) { dim3 block = block2D(32); dim3 grid = grid2D(input.width, input.height, block); g_Apply<<<grid, block>>>(input, operation); cudaCheckLastCall(); #ifdef DEBUG cudaSafeCall(cudaDeviceSynchronize()); #endif } template <typename T> struct AddTo { AddTo() {} nvstd::function<void(T&, const T&)> __device__ op() { return [*this] __device__(T & val1, const T& val2) { val1 += val2; }; } }; template <typename T> struct SubFrom { SubFrom() {} nvstd::function<void(T&, const T&)> __device__ op() { return [*this] __device__(T & val1, const T& val2) { val1 -= val2; }; } }; template <typename T> struct MulBy { MulBy() {} nvstd::function<void(T&, const T&)> __device__ op() { return [*this] __device__(T & val1, const T& val2) { val1 *= val2; }; } }; template <typename T> struct DivBy { DivBy() {} nvstd::function<void(T&, const T&)> __device__ op() { return [*this] __device__(T & val1, const T& val2) { val1 /= val2; }; } }; template <typename T> void cu_AddTo(DevPtr<T> image, const DevPtr<T>& other) { AddTo<T> add_op; cu_Apply(image, other, add_op); } template <typename T> void cu_MultiplyBy(DevPtr<T> image, const DevPtr<T>& other) { MulBy<T> mul_op; cu_Apply(image, other, mul_op); } template <typename T> void cu_SubtractFrom(DevPtr<T> image, const DevPtr<T>& other) { SubFrom<T> sub_op; cu_Apply(image, other, sub_op); } template <typename T> void cu_DivideBy(DevPtr<T> image, const DevPtr<T>& other) { DivBy<T> div_op; cu_Apply(image, other, div_op); } template <typename T> struct AddValTo { AddValTo(const T& val) : val_(val) { } nvstd::function<void(T&)> __device__ op() { return [*this] __device__(T & val) { val += val_; }; } private: const T val_; }; template <typename T> struct SubValFrom { SubValFrom(const T& val) : val_(val) { } nvstd::function<void(T&)> __device__ op() { return [*this] __device__(T & val) { val -= val_; }; } private: const T val_; }; template <typename T> struct MulByVal { MulByVal(const T& val) : val_(val) { } nvstd::function<void(T&)> __device__ op() { return [*this] __device__(T & val) { val *= val_; }; } private: const T val_; }; template <typename T> struct DivByVal { DivByVal(const T& val) : val_(val) { } nvstd::function<void(T&)> __device__ op() { return [*this] __device__(T & val) { val /= val_; }; } private: const T val_; }; template <typename T> void cu_AddTo(DevPtr<T> image, const T& value) { AddValTo<T> add_op(value); cu_Apply(image, add_op); } template <typename T> void cu_MultiplyBy(DevPtr<T> image, const T& value) { MulByVal<T> mul_op(value); cu_Apply(image, mul_op); } template <typename T> void cu_SubtractFrom(DevPtr<T> image, const T& value) { SubValFrom<T> sub_op(value); cu_Apply(image, sub_op); } template <typename T> void cu_DivideBy(DevPtr<T> image, const T& value) { DivByVal<T> div_op(value); cu_Apply(image, div_op); } /** * Explicit instantiations */ #define DECLARE_MATH_OPERATION(type, function) \ template void cu_Apply(DevPtr<type>, const DevPtr<type>, function<type>); #define DECLARE_MATH_FUNCTION(type, function) \ template void function(DevPtr<type>, const DevPtr<type>&); FOR_EACH_TYPE(DECLARE_MATH_OPERATION, AddTo) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_AddTo) FOR_EACH_TYPE(DECLARE_MATH_OPERATION, SubFrom) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_SubtractFrom) FOR_EACH_TYPE(DECLARE_MATH_OPERATION, MulBy) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_MultiplyBy) FOR_EACH_TYPE(DECLARE_MATH_OPERATION, DivBy) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_DivideBy) #undef DECLARE_MATH_OPERATION #define DECLARE_MATH_OPERATION(type, function) \ template void cu_Apply(DevPtr<type>, function<type>); #undef DECLARE_MATH_FUNCTION #define DECLARE_MATH_FUNCTION(type, function) \ template void function(DevPtr<type>, const type&); FOR_EACH_TYPE(DECLARE_MATH_OPERATION, AddValTo) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_AddTo) FOR_EACH_TYPE(DECLARE_MATH_OPERATION, SubValFrom) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_SubtractFrom) FOR_EACH_TYPE(DECLARE_MATH_OPERATION, MulByVal) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_MultiplyBy) FOR_EACH_TYPE(DECLARE_MATH_OPERATION, DivByVal) FOR_EACH_TYPE(DECLARE_MATH_FUNCTION, cu_DivideBy) } // image
81fe945b9a405d0cc1660326da55d2ca4377de80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sph_header.h" #include "sph_param.h" #include "sph_math.h" #include "sph_kernel.cu" #include <cutil_math.h> #define EXP 2.718281f #define EXT2 1e-12f #define EXT 1e-6f __device__ float wavelet(float input) { float sigma=2.0; return 2.0f/(pow(PI, 0.25f)*pow(3.0f*sigma, 0.5f)) * ((input*input)/(sigma*sigma)-1) * pow(EXP, -(input*input)/(2*sigma*sigma)); } __device__ uint3 calc_grid_pos(float x, float y ,float z) { uint3 cell_pos; cell_pos.x=(uint)floor(x / dev_param.cell_size); cell_pos.y=(uint)floor(y / dev_param.cell_size); cell_pos.z=(uint)floor(z / dev_param.cell_size); return cell_pos; } __device__ uint calc_grid_hash(uint3 cell_pos) { return cell_pos.z*dev_param.row_cell*dev_param.col_cell + cell_pos.y*dev_param.row_cell + cell_pos.x; } void set_parameters(SysParam *host_param) { hipMemcpyToSymbol((char *)&dev_param, host_param, sizeof(SysParam)); } void alloc_array(void **dev_ptr, size_t size) { hipMalloc(dev_ptr, size); } void free_array(void *dev_ptr) { hipFree(dev_ptr); } void copy_array(void *ptr_a, void *ptr_b, size_t size, int type) { if(type == 1) { hipMemcpy(ptr_a, ptr_b, size, hipMemcpyHostToDevice); return; } if(type == 2) { hipMemcpy(ptr_a, ptr_b, size, hipMemcpyDeviceToHost); return; } if(type == 3) { hipMemcpy(ptr_a, ptr_b, size, hipMemcpyDeviceToDevice); return; } return; } void compute_grid_size(uint num_particle, uint block_size, uint &num_blocks, uint &num_threads) { num_threads=min(block_size, num_particle); num_blocks=iDivUp(num_particle, num_threads); } __global__ void calcHashD(uint *dev_hash, uint *dev_index, Particle *dev_mem, uint num_particle) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_particle) { return; } uint3 grid_pos=calc_grid_pos(dev_mem[index].pos.x, dev_mem[index].pos.y, dev_mem[index].pos.z); uint hash=calc_grid_hash(grid_pos); dev_hash[index]=hash; dev_index[index]=index; } void calc_hash(uint* dev_hash, uint* dev_index, Particle *dev_mem, uint num_particle) { if(num_particle == 0) { return; } uint num_threads; uint num_blocks; compute_grid_size(num_particle, 256, num_blocks, num_threads); hipLaunchKernelGGL(( calcHashD), dim3(num_blocks), dim3(num_threads) , 0, 0, dev_hash, dev_index, dev_mem, num_particle); } void sort_particles(uint *dev_hash, uint *dev_index, uint num_particle) { if(num_particle == 0) { return; } thrust::sort_by_key(thrust::device_ptr<uint>(dev_hash), thrust::device_ptr<uint>(dev_hash + num_particle), thrust::device_ptr<uint>(dev_index)); } __global__ void find_start_end_kernel(uint *cell_start, uint *cell_end, uint *dev_hash, uint *dev_index, uint num_particle) { extern __shared__ uint shared_hash[]; uint index=blockIdx.x*blockDim.x+threadIdx.x; uint hash; if(index < num_particle) { hash=dev_hash[index]; shared_hash[threadIdx.x+1]=hash; if(index > 0 && threadIdx.x == 0) { shared_hash[0]=dev_hash[index-1]; } } __syncthreads(); if(index < num_particle) { if(index == 0 || hash != shared_hash[threadIdx.x]) { cell_start[hash]=index; if(index > 0) { cell_end[shared_hash[threadIdx.x]]=index; } } if (index == num_particle-1) { cell_end[hash]=index+1; } } } void find_start_end(uint *cell_start, uint *cell_end, uint *dev_hash, uint *dev_index, uint num_particle, uint num_cell) { if(num_particle == 0) { return; } uint num_thread; uint num_block; compute_grid_size(num_particle, 256, num_block, num_thread); hipMemset(cell_start, 0xffffffff, num_cell*sizeof(int)); hipMemset(cell_end, 0x0, num_cell*sizeof(int)); uint smemSize=sizeof(int)*(num_thread+1); hipLaunchKernelGGL(( find_start_end_kernel), dim3(num_block), dim3(num_thread), smemSize, 0, cell_start, cell_end, dev_hash, dev_index, num_particle); } __global__ void integrate_velocity_kernel(Particle* dev_mem, uint num_particle) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_particle) { return; } Particle *p=&(dev_mem[index]); float radius; if(p->level == 1) { radius=dev_param.large_radius; } if(p->level == 2) { radius=dev_param.small_radius; } //external force if(dev_param.force_waiting_left == true) { if(p->pos.x < dev_param.world_width/3 && p->pos.y > dev_param.world_height/10) { p->acc.x=p->acc.x+20.0f; } } if(dev_param.force_waiting_right == true) { if(p->pos.x > dev_param.world_width/3*2 && p->pos.y > dev_param.world_height/10) { p->acc.x=p->acc.x-20.0f; } } p->vel=p->vel+p->acc*dev_param.time_step/p->dens; p->vel=p->vel+dev_param.gravity*dev_param.time_step/p->dens; p->pos=p->pos+p->vel*dev_param.time_step; if(dev_param.use_cylinder == true) { //handle collision with the sphere 1 float3 dist_vec=dev_mem[index].pos-dev_param.sphere1.pos; float distance=sqrt(dist_vec.x*dist_vec.x+dist_vec.y*dist_vec.y+dist_vec.z*dist_vec.z); if(distance < dev_param.sphere1.radius+radius) { float3 poxyz=dev_mem[index].pos-dev_param.sphere1.pos; float3 dxyz=poxyz/distance*(dev_param.sphere1.radius+radius); float3 normal=dxyz; normal=normalize(normal); dev_mem[index].pos=dev_param.sphere1.pos+dxyz; dev_mem[index].vel=dev_mem[index].vel-dev_param.sphere1.damping*(dot(dev_mem[index].vel, normal))*normal; } //handle collision with the sphere 2 dist_vec=dev_mem[index].pos-dev_param.sphere2.pos; distance=sqrt(dist_vec.x*dist_vec.x+dist_vec.y*dist_vec.y+dist_vec.z*dist_vec.z); if(distance < dev_param.sphere2.radius+radius) { float3 poxyz=dev_mem[index].pos-dev_param.sphere2.pos; float3 dxyz=poxyz/distance*(dev_param.sphere2.radius+radius); float3 normal=dxyz; normal=normalize(normal); dev_mem[index].pos=dev_param.sphere2.pos+dxyz; dev_mem[index].vel=dev_mem[index].vel-dev_param.sphere2.damping*(dot(dev_mem[index].vel, normal))*normal; } //handle collision with the sphere 3 dist_vec=dev_mem[index].pos-dev_param.sphere3.pos; distance=sqrt(dist_vec.x*dist_vec.x+dist_vec.y*dist_vec.y+dist_vec.z*dist_vec.z); if(distance < dev_param.sphere3.radius+radius) { float3 poxyz=dev_mem[index].pos-dev_param.sphere3.pos; float3 dxyz=poxyz/distance*(dev_param.sphere3.radius+radius); float3 normal=dxyz; normal=normalize(normal); dev_mem[index].pos=dev_param.sphere3.pos+dxyz; dev_mem[index].vel=dev_mem[index].vel-dev_param.sphere3.damping*(dot(dev_mem[index].vel, normal))*normal; } } if(p->pos.x >= dev_param.world_width-radius) { p->vel.x=p->vel.x*dev_param.wall_damping; p->pos.x=dev_param.world_width-radius; } if(p->pos.x < radius) { p->vel.x=p->vel.x*dev_param.wall_damping; p->pos.x=radius; } if(p->pos.y >= dev_param.world_height-radius) { p->vel.y=p->vel.y*dev_param.wall_damping; p->pos.y=dev_param.world_height-radius; } if(p->pos.y < radius) { p->vel.y=p->vel.y*dev_param.wall_damping; p->pos.y=radius; } if(p->pos.z >= dev_param.world_length-radius) { p->vel.z=p->vel.z*dev_param.wall_damping; p->pos.z=dev_param.world_length-radius; } if(p->pos.z < radius) { p->vel.z=p->vel.z*dev_param.wall_damping; p->pos.z=radius; } p->ev=(p->ev+p->vel)/2; } void integrate_velocity(Particle *dev_mem, uint num_particle) { if(num_particle == 0) { return; } uint num_thread; uint num_block; compute_grid_size(num_particle, 256, num_block, num_thread); hipLaunchKernelGGL(( integrate_velocity_kernel), dim3(num_block), dim3(num_thread) , 0, 0, dev_mem, num_particle); } __device__ float compute_cell_density(uint index, uint3 neighbor, Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell) { uint grid_hash=calc_grid_hash(neighbor); uint start_index=cell_start[grid_hash]; float total_cell_density=0.0f; float i_mass; float i_kernel_2; float i_kernel_6; float i_poly6_value; float j_mass; float3 rel_pos; float r2; Particle *p=&(dev_mem[index]); Particle *np; uint neighbor_index; if(p->level == 1) { i_mass=dev_param.large_mass; i_kernel_2=dev_param.large_kernel_2; i_kernel_6=dev_param.large_kernel_6; i_poly6_value=dev_param.large_poly6; } if(p->level == 2) { i_mass=dev_param.small_mass; i_kernel_2=dev_param.small_kernel_2; i_kernel_6=dev_param.small_kernel_6; i_poly6_value=dev_param.small_poly6; } if(start_index != 0xffffffff) { uint end_index=cell_end[grid_hash]; for(uint count_index=start_index; count_index<end_index; count_index++) { neighbor_index=dev_index[count_index]; np=&(dev_mem[neighbor_index]); if(neighbor_index != index) { rel_pos=np->pos-p->pos; r2=rel_pos.x*rel_pos.x+rel_pos.y*rel_pos.y+rel_pos.z*rel_pos.z; if(r2 < 0.0001f) { continue; } if(r2 < i_kernel_2) { if(np->level == 1) { j_mass=dev_param.large_mass; } if(np->level == 2) { j_mass=dev_param.small_mass; } total_cell_density=total_cell_density + j_mass * i_poly6_value * pow(i_kernel_2-r2, 3); } } if(neighbor_index == index) { total_cell_density=total_cell_density + i_mass * i_poly6_value * i_kernel_6; } } } return total_cell_density; } __global__ void compute_density_kernel(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_particle) { return; } uint3 grid_pos=calc_grid_pos(dev_mem[index].pos.x, dev_mem[index].pos.y, dev_mem[index].pos.z); uint count; uint3 neighbor_pos[27]; uint neighbor_status[27]; for(count=0; count<27; count++) { neighbor_status[count]=1; } neighbor_pos[0].x=grid_pos.x; neighbor_pos[0].y=grid_pos.y; neighbor_pos[0].z=grid_pos.z; neighbor_pos[1].x=grid_pos.x-1; neighbor_pos[1].y=grid_pos.y-1; neighbor_pos[1].z=grid_pos.z; neighbor_pos[2].x=grid_pos.x-1; neighbor_pos[2].y=grid_pos.y; neighbor_pos[2].z=grid_pos.z; neighbor_pos[3].x=grid_pos.x; neighbor_pos[3].y=grid_pos.y-1; neighbor_pos[3].z=grid_pos.z; neighbor_pos[4].x=grid_pos.x+1; neighbor_pos[4].y=grid_pos.y+1; neighbor_pos[4].z=grid_pos.z; neighbor_pos[5].x=grid_pos.x+1; neighbor_pos[5].y=grid_pos.y; neighbor_pos[5].z=grid_pos.z; neighbor_pos[6].x=grid_pos.x; neighbor_pos[6].y=grid_pos.y+1; neighbor_pos[6].z=grid_pos.z; neighbor_pos[7].x=grid_pos.x+1; neighbor_pos[7].y=grid_pos.y-1; neighbor_pos[7].z=grid_pos.z; neighbor_pos[8].x=grid_pos.x-1; neighbor_pos[8].y=grid_pos.y+1; neighbor_pos[8].z=grid_pos.z; neighbor_pos[9].x=grid_pos.x; neighbor_pos[9].y=grid_pos.y; neighbor_pos[9].z=grid_pos.z-1; neighbor_pos[10].x=grid_pos.x-1; neighbor_pos[10].y=grid_pos.y-1; neighbor_pos[10].z=grid_pos.z-1; neighbor_pos[11].x=grid_pos.x-1; neighbor_pos[11].y=grid_pos.y; neighbor_pos[11].z=grid_pos.z-1; neighbor_pos[12].x=grid_pos.x; neighbor_pos[12].y=grid_pos.y-1; neighbor_pos[12].z=grid_pos.z-1; neighbor_pos[13].x=grid_pos.x+1; neighbor_pos[13].y=grid_pos.y+1; neighbor_pos[13].z=grid_pos.z-1; neighbor_pos[14].x=grid_pos.x+1; neighbor_pos[14].y=grid_pos.y; neighbor_pos[14].z=grid_pos.z-1; neighbor_pos[15].x=grid_pos.x; neighbor_pos[15].y=grid_pos.y+1; neighbor_pos[15].z=grid_pos.z-1; neighbor_pos[16].x=grid_pos.x+1; neighbor_pos[16].y=grid_pos.y-1; neighbor_pos[16].z=grid_pos.z-1; neighbor_pos[17].x=grid_pos.x-1; neighbor_pos[17].y=grid_pos.y+1; neighbor_pos[17].z=grid_pos.z-1; neighbor_pos[18].x=grid_pos.x; neighbor_pos[18].y=grid_pos.y; neighbor_pos[18].z=grid_pos.z+1; neighbor_pos[19].x=grid_pos.x-1; neighbor_pos[19].y=grid_pos.y-1; neighbor_pos[19].z=grid_pos.z+1; neighbor_pos[20].x=grid_pos.x-1; neighbor_pos[20].y=grid_pos.y; neighbor_pos[20].z=grid_pos.z+1; neighbor_pos[21].x=grid_pos.x; neighbor_pos[21].y=grid_pos.y-1; neighbor_pos[21].z=grid_pos.z+1; neighbor_pos[22].x=grid_pos.x+1; neighbor_pos[22].y=grid_pos.y+1; neighbor_pos[22].z=grid_pos.z+1; neighbor_pos[23].x=grid_pos.x+1; neighbor_pos[23].y=grid_pos.y; neighbor_pos[23].z=grid_pos.z+1; neighbor_pos[24].x=grid_pos.x; neighbor_pos[24].y=grid_pos.y+1; neighbor_pos[24].z=grid_pos.z+1; neighbor_pos[25].x=grid_pos.x+1; neighbor_pos[25].y=grid_pos.y-1; neighbor_pos[25].z=grid_pos.z+1; neighbor_pos[26].x=grid_pos.x-1; neighbor_pos[26].y=grid_pos.y+1; neighbor_pos[26].z=grid_pos.z+1; if(grid_pos.x == 0) { neighbor_status[1]=0; neighbor_status[2]=0; neighbor_status[8]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[17]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[26]=0; } if(grid_pos.x == dev_param.row_cell-1) { neighbor_status[4]=0; neighbor_status[5]=0; neighbor_status[7]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[16]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[25]=0; } if(grid_pos.y == 0) { neighbor_status[1]=0; neighbor_status[3]=0; neighbor_status[7]=0; neighbor_status[10]=0; neighbor_status[12]=0; neighbor_status[16]=0; neighbor_status[19]=0; neighbor_status[21]=0; neighbor_status[25]=0; } if(grid_pos.y == dev_param.col_cell-1) { neighbor_status[4]=0; neighbor_status[6]=0; neighbor_status[8]=0; neighbor_status[13]=0; neighbor_status[15]=0; neighbor_status[17]=0; neighbor_status[22]=0; neighbor_status[24]=0; neighbor_status[26]=0; } if(grid_pos.z == 0) { neighbor_status[9]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[12]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[15]=0; neighbor_status[16]=0; neighbor_status[17]=0; } if(grid_pos.z == dev_param.len_cell-1) { neighbor_status[18]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[21]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[24]=0; neighbor_status[25]=0; neighbor_status[26]=0; } float total_density=0; for(count=0; count<27; count++) { if(neighbor_status[count] == 0) { continue; } total_density=total_density+compute_cell_density(index, neighbor_pos[count], dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell); } dev_mem[index].dens=total_density; dev_mem[index].press=(pow(dev_mem[index].dens / dev_param.rest_density, 3) - 1) * dev_param.gas_constant; //dev_mem[index].press=(dev_mem[index].dens / dev_param.rest_density - 1) * dev_param.gas_constant; } __device__ float3 compute_cell_force(uint index, uint3 neighbor, Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, float4 *surface) { uint grid_hash=calc_grid_hash(neighbor); uint start_index=cell_start[grid_hash]; float3 total_cell_force=make_float3(0.0f); float i_kernel; float i_mass; float i_kernel_2; float i_kernel_6; float i_poly6_value; float i_spiky_value; float i_visco_value; float i_grad_poly6; float i_lapc_poly6; float j_kernel; float j_mass; float j_kernel_2; float j_kernel_6; float j_spiky_value; float j_visco_value; float i_press_kernel; float i_visco_kernel; float j_press_kernel; float j_visco_kernel; float i_kernel_r; float j_kernel_r; float iV; float jV; uint neighbor_index; Particle *p=&(dev_mem[index]); Particle *np; float3 rel_pos; float r2; float r; float temp_force; float3 rel_vel; if(p->level == 1) { i_kernel=dev_param.large_kernel; i_mass=dev_param.large_mass; i_kernel_2=dev_param.large_kernel_2; i_kernel_6=dev_param.large_kernel_6; i_poly6_value=dev_param.large_poly6; i_spiky_value=dev_param.large_spiky; i_visco_value=dev_param.large_visco; i_grad_poly6=dev_param.large_grad_poly6; i_lapc_poly6=dev_param.large_lapc_poly6; } if(p->level == 2) { i_kernel=dev_param.small_kernel; i_mass=dev_param.small_mass; i_kernel_2=dev_param.small_kernel_2; i_kernel_6=dev_param.small_kernel_6; i_poly6_value=dev_param.small_poly6; i_spiky_value=dev_param.small_spiky; i_visco_value=dev_param.small_visco; i_grad_poly6=dev_param.small_grad_poly6; i_lapc_poly6=dev_param.small_lapc_poly6; } if(start_index != 0xffffffff) { uint end_index=cell_end[grid_hash]; for(uint count_index=start_index; count_index<end_index; count_index++) { neighbor_index=dev_index[count_index]; np=&(dev_mem[neighbor_index]); if(neighbor_index != index) { rel_pos=p->pos-np->pos; r2=rel_pos.x*rel_pos.x+rel_pos.y*rel_pos.y+rel_pos.z*rel_pos.z; if(np->level == 1) { j_kernel=dev_param.large_kernel; j_mass=dev_param.large_mass; j_kernel_2=dev_param.large_kernel_2; j_kernel_6=dev_param.large_kernel_6; j_spiky_value=dev_param.large_spiky; j_visco_value=dev_param.large_visco; } if(np->level == 2) { j_kernel=dev_param.small_kernel; j_mass=dev_param.small_mass; j_kernel_2=dev_param.small_kernel_2; j_kernel_6=dev_param.small_kernel_6; j_spiky_value=dev_param.small_spiky; j_visco_value=dev_param.small_visco; } float max_kernel_2=i_kernel_2>j_kernel_2?i_kernel_2:j_kernel_2; if(r2 < max_kernel_2) { if(r2 < 0.0001f) { continue; } else { r=sqrt(r2); } iV=i_mass/p->dens; jV=j_mass/np->dens; i_kernel_r=i_kernel-r; j_kernel_r=j_kernel-r; if(i_kernel_r > 0) { i_press_kernel=i_spiky_value * i_kernel_r * i_kernel_r / i_kernel_6; i_visco_kernel=i_visco_value/i_kernel_6*(i_kernel_r); //surface tension float temp=(-1) * i_grad_poly6 * j_mass / np->dens * pow(i_kernel_2-r2, 2); surface->x += temp * rel_pos.x; surface->y += temp * rel_pos.y; surface->z += temp * rel_pos.z; surface->w += i_lapc_poly6 * j_mass / np->dens * (i_kernel_2-r2) * (r2-3/4*(i_kernel_2-r2)); } else { i_press_kernel=0.0f; i_visco_kernel=0.0f; } if(j_kernel_r > 0) { j_press_kernel=j_spiky_value * j_kernel_r * j_kernel_r / j_kernel_6; j_visco_kernel=j_visco_value/j_kernel_6*(j_kernel_r); } else { j_press_kernel=0.0f; j_visco_kernel=0.0f; } temp_force=i_mass*j_mass * (p->press/(p->dens*p->dens)+np->press/(np->dens*np->dens)) * (i_press_kernel+j_press_kernel)/2; total_cell_force=total_cell_force-rel_pos*temp_force/r; rel_vel=np->ev-p->ev; temp_force=(iV*jV) * dev_param.viscosity * (i_visco_kernel+j_visco_kernel)/2; total_cell_force=total_cell_force + rel_vel*temp_force; } } if(neighbor_index == index) { } } } return total_cell_force; } __global__ void compute_force_kernel(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_particle) { return; } uint3 grid_pos=calc_grid_pos(dev_mem[index].pos.x, dev_mem[index].pos.y, dev_mem[index].pos.z); uint count; uint3 neighbor_pos[27]; uint neighbor_status[27]; for(count=0; count<27; count++) { neighbor_status[count]=1; } neighbor_pos[0].x=grid_pos.x; neighbor_pos[0].y=grid_pos.y; neighbor_pos[0].z=grid_pos.z; neighbor_pos[1].x=grid_pos.x-1; neighbor_pos[1].y=grid_pos.y-1; neighbor_pos[1].z=grid_pos.z; neighbor_pos[2].x=grid_pos.x-1; neighbor_pos[2].y=grid_pos.y; neighbor_pos[2].z=grid_pos.z; neighbor_pos[3].x=grid_pos.x; neighbor_pos[3].y=grid_pos.y-1; neighbor_pos[3].z=grid_pos.z; neighbor_pos[4].x=grid_pos.x+1; neighbor_pos[4].y=grid_pos.y+1; neighbor_pos[4].z=grid_pos.z; neighbor_pos[5].x=grid_pos.x+1; neighbor_pos[5].y=grid_pos.y; neighbor_pos[5].z=grid_pos.z; neighbor_pos[6].x=grid_pos.x; neighbor_pos[6].y=grid_pos.y+1; neighbor_pos[6].z=grid_pos.z; neighbor_pos[7].x=grid_pos.x+1; neighbor_pos[7].y=grid_pos.y-1; neighbor_pos[7].z=grid_pos.z; neighbor_pos[8].x=grid_pos.x-1; neighbor_pos[8].y=grid_pos.y+1; neighbor_pos[8].z=grid_pos.z; neighbor_pos[9].x=grid_pos.x; neighbor_pos[9].y=grid_pos.y; neighbor_pos[9].z=grid_pos.z-1; neighbor_pos[10].x=grid_pos.x-1; neighbor_pos[10].y=grid_pos.y-1; neighbor_pos[10].z=grid_pos.z-1; neighbor_pos[11].x=grid_pos.x-1; neighbor_pos[11].y=grid_pos.y; neighbor_pos[11].z=grid_pos.z-1; neighbor_pos[12].x=grid_pos.x; neighbor_pos[12].y=grid_pos.y-1; neighbor_pos[12].z=grid_pos.z-1; neighbor_pos[13].x=grid_pos.x+1; neighbor_pos[13].y=grid_pos.y+1; neighbor_pos[13].z=grid_pos.z-1; neighbor_pos[14].x=grid_pos.x+1; neighbor_pos[14].y=grid_pos.y; neighbor_pos[14].z=grid_pos.z-1; neighbor_pos[15].x=grid_pos.x; neighbor_pos[15].y=grid_pos.y+1; neighbor_pos[15].z=grid_pos.z-1; neighbor_pos[16].x=grid_pos.x+1; neighbor_pos[16].y=grid_pos.y-1; neighbor_pos[16].z=grid_pos.z-1; neighbor_pos[17].x=grid_pos.x-1; neighbor_pos[17].y=grid_pos.y+1; neighbor_pos[17].z=grid_pos.z-1; neighbor_pos[18].x=grid_pos.x; neighbor_pos[18].y=grid_pos.y; neighbor_pos[18].z=grid_pos.z+1; neighbor_pos[19].x=grid_pos.x-1; neighbor_pos[19].y=grid_pos.y-1; neighbor_pos[19].z=grid_pos.z+1; neighbor_pos[20].x=grid_pos.x-1; neighbor_pos[20].y=grid_pos.y; neighbor_pos[20].z=grid_pos.z+1; neighbor_pos[21].x=grid_pos.x; neighbor_pos[21].y=grid_pos.y-1; neighbor_pos[21].z=grid_pos.z+1; neighbor_pos[22].x=grid_pos.x+1; neighbor_pos[22].y=grid_pos.y+1; neighbor_pos[22].z=grid_pos.z+1; neighbor_pos[23].x=grid_pos.x+1; neighbor_pos[23].y=grid_pos.y; neighbor_pos[23].z=grid_pos.z+1; neighbor_pos[24].x=grid_pos.x; neighbor_pos[24].y=grid_pos.y+1; neighbor_pos[24].z=grid_pos.z+1; neighbor_pos[25].x=grid_pos.x+1; neighbor_pos[25].y=grid_pos.y-1; neighbor_pos[25].z=grid_pos.z+1; neighbor_pos[26].x=grid_pos.x-1; neighbor_pos[26].y=grid_pos.y+1; neighbor_pos[26].z=grid_pos.z+1; if(grid_pos.x == 0) { neighbor_status[1]=0; neighbor_status[2]=0; neighbor_status[8]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[17]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[26]=0; } if(grid_pos.x == dev_param.row_cell-1) { neighbor_status[4]=0; neighbor_status[5]=0; neighbor_status[7]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[16]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[25]=0; } if(grid_pos.y == 0) { neighbor_status[1]=0; neighbor_status[3]=0; neighbor_status[7]=0; neighbor_status[10]=0; neighbor_status[12]=0; neighbor_status[16]=0; neighbor_status[19]=0; neighbor_status[21]=0; neighbor_status[25]=0; } if(grid_pos.y == dev_param.col_cell-1) { neighbor_status[4]=0; neighbor_status[6]=0; neighbor_status[8]=0; neighbor_status[13]=0; neighbor_status[15]=0; neighbor_status[17]=0; neighbor_status[22]=0; neighbor_status[24]=0; neighbor_status[26]=0; } if(grid_pos.z == 0) { neighbor_status[9]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[12]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[15]=0; neighbor_status[16]=0; neighbor_status[17]=0; } if(grid_pos.z == dev_param.len_cell-1) { neighbor_status[18]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[21]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[24]=0; neighbor_status[25]=0; neighbor_status[26]=0; } float3 total_force=make_float3(0.0f, 0.0f, 0.0f); float4 surface=make_float4(0.0f, 0.0f, 0.0f, 0.0f); for(count=0; count<27; count++) { if(neighbor_status[count] == 0) { continue; } total_force=total_force+compute_cell_force(index, neighbor_pos[count], dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell, &surface); } float3 grad; float lapc; grad.x=surface.x; grad.y=surface.y; grad.z=surface.z; lapc=surface.w; float3 force; float normal; normal=sqrt(grad.x*grad.x+grad.y*grad.y+grad.z*grad.z); dev_mem[index].surface=normal; if(normal > dev_param.surface_tension) { force=0.02f * lapc * grad / normal; } else { force=make_float3(0.0f, 0.0f, 0.0f); } dev_mem[index].acc=total_force+force; } void compute(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell) { uint num_thread; uint num_block; compute_grid_size(num_particle, 256, num_block, num_thread); hipLaunchKernelGGL(( compute_density_kernel), dim3(num_block), dim3(num_thread) , 0, 0, dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell); hipLaunchKernelGGL(( compute_force_kernel), dim3(num_block), dim3(num_thread) , 0, 0, dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell); } __device__ float3 compute_cell_energy(uint index, uint3 neighbor, Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, float3 *trans_vel) { uint grid_hash=calc_grid_hash(neighbor); uint start_index=cell_start[grid_hash]; float3 cell_trans=make_float3(0.0f, 0.0f, 0.0f); float3 transform=make_float3(0.0f, 0.0f, 0.0f); uint neighbor_index; Particle *p=&(dev_mem[index]); Particle *np; float3 rel_pos; float r2; float r; float kernel; float kernel2; if(p->level == 1) { kernel=dev_param.large_kernel; kernel2=dev_param.large_kernel_2; } if(p->level == 2) { kernel=dev_param.large_kernel; kernel2=dev_param.large_kernel_2; } if(start_index != 0xffffffff) { uint end_index=cell_end[grid_hash]; for(uint count_index=start_index; count_index<end_index; count_index++) { neighbor_index=dev_index[count_index]; np=&(dev_mem[neighbor_index]); if(neighbor_index != index) { rel_pos=p->pos-np->pos; r2=rel_pos.x*rel_pos.x+rel_pos.y*rel_pos.y+rel_pos.z*rel_pos.z; if(r2 < kernel2) { if(r2 < 0.0001f) { continue; } else { r=sqrt(r2); } transform.x=wavelet((p->pos.x-np->pos.x)/kernel); transform.y=wavelet((p->pos.y-np->pos.y)/kernel); transform.z=wavelet((p->pos.z-np->pos.z)/kernel); cell_trans=cell_trans+transform; trans_vel[index].x=trans_vel[index].x+(np->vel.x*transform.x/pow(kernel, 0.5f)); trans_vel[index].y=trans_vel[index].y+(np->vel.y*transform.y/pow(kernel, 0.5f)); trans_vel[index].z=trans_vel[index].z+(np->vel.z*transform.z/pow(kernel, 0.5f)); } } if(neighbor_index == index) { } } } return cell_trans; } __global__ void compute_energy_kernel(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, float3 *trans_vel) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_particle) { return; } uint3 grid_pos=calc_grid_pos(dev_mem[index].pos.x, dev_mem[index].pos.y, dev_mem[index].pos.z); uint count; uint3 neighbor_pos[27]; uint neighbor_status[27]; for(count=0; count<27; count++) { neighbor_status[count]=1; } neighbor_pos[0].x=grid_pos.x; neighbor_pos[0].y=grid_pos.y; neighbor_pos[0].z=grid_pos.z; neighbor_pos[1].x=grid_pos.x-1; neighbor_pos[1].y=grid_pos.y-1; neighbor_pos[1].z=grid_pos.z; neighbor_pos[2].x=grid_pos.x-1; neighbor_pos[2].y=grid_pos.y; neighbor_pos[2].z=grid_pos.z; neighbor_pos[3].x=grid_pos.x; neighbor_pos[3].y=grid_pos.y-1; neighbor_pos[3].z=grid_pos.z; neighbor_pos[4].x=grid_pos.x+1; neighbor_pos[4].y=grid_pos.y+1; neighbor_pos[4].z=grid_pos.z; neighbor_pos[5].x=grid_pos.x+1; neighbor_pos[5].y=grid_pos.y; neighbor_pos[5].z=grid_pos.z; neighbor_pos[6].x=grid_pos.x; neighbor_pos[6].y=grid_pos.y+1; neighbor_pos[6].z=grid_pos.z; neighbor_pos[7].x=grid_pos.x+1; neighbor_pos[7].y=grid_pos.y-1; neighbor_pos[7].z=grid_pos.z; neighbor_pos[8].x=grid_pos.x-1; neighbor_pos[8].y=grid_pos.y+1; neighbor_pos[8].z=grid_pos.z; neighbor_pos[9].x=grid_pos.x; neighbor_pos[9].y=grid_pos.y; neighbor_pos[9].z=grid_pos.z-1; neighbor_pos[10].x=grid_pos.x-1; neighbor_pos[10].y=grid_pos.y-1; neighbor_pos[10].z=grid_pos.z-1; neighbor_pos[11].x=grid_pos.x-1; neighbor_pos[11].y=grid_pos.y; neighbor_pos[11].z=grid_pos.z-1; neighbor_pos[12].x=grid_pos.x; neighbor_pos[12].y=grid_pos.y-1; neighbor_pos[12].z=grid_pos.z-1; neighbor_pos[13].x=grid_pos.x+1; neighbor_pos[13].y=grid_pos.y+1; neighbor_pos[13].z=grid_pos.z-1; neighbor_pos[14].x=grid_pos.x+1; neighbor_pos[14].y=grid_pos.y; neighbor_pos[14].z=grid_pos.z-1; neighbor_pos[15].x=grid_pos.x; neighbor_pos[15].y=grid_pos.y+1; neighbor_pos[15].z=grid_pos.z-1; neighbor_pos[16].x=grid_pos.x+1; neighbor_pos[16].y=grid_pos.y-1; neighbor_pos[16].z=grid_pos.z-1; neighbor_pos[17].x=grid_pos.x-1; neighbor_pos[17].y=grid_pos.y+1; neighbor_pos[17].z=grid_pos.z-1; neighbor_pos[18].x=grid_pos.x; neighbor_pos[18].y=grid_pos.y; neighbor_pos[18].z=grid_pos.z+1; neighbor_pos[19].x=grid_pos.x-1; neighbor_pos[19].y=grid_pos.y-1; neighbor_pos[19].z=grid_pos.z+1; neighbor_pos[20].x=grid_pos.x-1; neighbor_pos[20].y=grid_pos.y; neighbor_pos[20].z=grid_pos.z+1; neighbor_pos[21].x=grid_pos.x; neighbor_pos[21].y=grid_pos.y-1; neighbor_pos[21].z=grid_pos.z+1; neighbor_pos[22].x=grid_pos.x+1; neighbor_pos[22].y=grid_pos.y+1; neighbor_pos[22].z=grid_pos.z+1; neighbor_pos[23].x=grid_pos.x+1; neighbor_pos[23].y=grid_pos.y; neighbor_pos[23].z=grid_pos.z+1; neighbor_pos[24].x=grid_pos.x; neighbor_pos[24].y=grid_pos.y+1; neighbor_pos[24].z=grid_pos.z+1; neighbor_pos[25].x=grid_pos.x+1; neighbor_pos[25].y=grid_pos.y-1; neighbor_pos[25].z=grid_pos.z+1; neighbor_pos[26].x=grid_pos.x-1; neighbor_pos[26].y=grid_pos.y+1; neighbor_pos[26].z=grid_pos.z+1; if(grid_pos.x == 0) { neighbor_status[1]=0; neighbor_status[2]=0; neighbor_status[8]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[17]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[26]=0; } if(grid_pos.x == dev_param.row_cell-1) { neighbor_status[4]=0; neighbor_status[5]=0; neighbor_status[7]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[16]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[25]=0; } if(grid_pos.y == 0) { neighbor_status[1]=0; neighbor_status[3]=0; neighbor_status[7]=0; neighbor_status[10]=0; neighbor_status[12]=0; neighbor_status[16]=0; neighbor_status[19]=0; neighbor_status[21]=0; neighbor_status[25]=0; } if(grid_pos.y == dev_param.col_cell-1) { neighbor_status[4]=0; neighbor_status[6]=0; neighbor_status[8]=0; neighbor_status[13]=0; neighbor_status[15]=0; neighbor_status[17]=0; neighbor_status[22]=0; neighbor_status[24]=0; neighbor_status[26]=0; } if(grid_pos.z == 0) { neighbor_status[9]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[12]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[15]=0; neighbor_status[16]=0; neighbor_status[17]=0; } if(grid_pos.z == dev_param.len_cell-1) { neighbor_status[18]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[21]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[24]=0; neighbor_status[25]=0; neighbor_status[26]=0; } float3 total_force=make_float3(0.0f, 0.0f, 0.0f); float3 total_trans=make_float3(0.0f, 0.0f, 0.0f); trans_vel[index]=make_float3(0.0f, 0.0f, 0.0f); for(count=0; count<27; count++) { if(neighbor_status[count] == 0) { continue; } total_trans=total_trans+compute_cell_energy(index, neighbor_pos[count], dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell, trans_vel); } float mass; if(dev_mem[index].level == 1) { mass=dev_param.large_mass; } if(dev_mem[index].level == 2) { mass=dev_param.small_mass; } trans_vel[index]=trans_vel[index]/total_trans; dev_mem[index].energy=0.5f*mass*(trans_vel[index].x*trans_vel[index].x+trans_vel[index].y*trans_vel[index].y); } void compute_energy(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, float3 *trans_vel) { uint num_thread; uint num_block; compute_grid_size(num_particle, 256, num_block, num_thread); hipLaunchKernelGGL(( compute_energy_kernel), dim3(num_block), dim3(num_thread), 0, 0, dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell, trans_vel); } __global__ void decide_adaptive_kernel(Particle *dev_mem, uint num_particle, uint *dev_split, uint *dev_index) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_particle) { return; } float co_surface=dev_param.co_surface; float co_energy=dev_param.co_energy; if(dev_mem[index].energy*co_energy+dev_mem[index].surface*co_surface > dev_param.split_criteria && dev_mem[index].level == 1) { dev_split[index]=0; } else { dev_split[index]=1; } dev_index[index]=index; } __global__ void adaptive_particle_kernel(Particle *dev_mem, uint *dev_split, uint *dev_index, uint num_particle, uint num_split, uint *dev_status) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_split) { return; } uint new1st=dev_index[index]; //uint new2nd=num_particle+index; uint new2nd=num_particle+index*3+0; uint new3rd=num_particle+index*3+1; uint new4th=num_particle+index*3+2; //2 dev_mem[new2nd].level=2; dev_mem[new2nd].pos.x=dev_mem[new1st].pos.x-dev_param.small_kernel/2; dev_mem[new2nd].pos.y=dev_mem[new1st].pos.y; dev_mem[new2nd].pos.z=dev_mem[new1st].pos.z; dev_mem[new2nd].vel=dev_mem[new1st].vel; dev_mem[new2nd].ev=dev_mem[new1st].ev; dev_mem[new2nd].acc=dev_mem[new1st].acc; dev_mem[new2nd].energy=dev_mem[new1st].energy/4; dev_status[new2nd]=0; //3 dev_mem[new3rd].level=2; dev_mem[new3rd].pos.x=dev_mem[new1st].pos.x; dev_mem[new3rd].pos.y=dev_mem[new1st].pos.y; dev_mem[new3rd].pos.z=dev_mem[new1st].pos.z-dev_param.small_kernel/2; dev_mem[new3rd].vel=dev_mem[new1st].vel; dev_mem[new3rd].ev=dev_mem[new1st].ev; dev_mem[new3rd].acc=dev_mem[new1st].acc; dev_mem[new3rd].energy=dev_mem[new1st].energy/4; dev_status[new3rd]=0; //4 dev_mem[new4th].level=2; dev_mem[new4th].pos.x=dev_mem[new1st].pos.x; dev_mem[new4th].pos.y=dev_mem[new1st].pos.y; dev_mem[new4th].pos.z=dev_mem[new1st].pos.z+dev_param.small_kernel/2; dev_mem[new4th].vel=dev_mem[new1st].vel; dev_mem[new4th].ev=dev_mem[new1st].ev; dev_mem[new4th].acc=dev_mem[new1st].acc; dev_mem[new4th].energy=dev_mem[new1st].energy/4; dev_status[new4th]=0; //1 dev_mem[new1st].level=2; dev_mem[new1st].pos.x=dev_mem[new1st].pos.x+dev_param.small_kernel/2; dev_mem[new1st].pos.y=dev_mem[new1st].pos.y; dev_mem[new1st].energy=dev_mem[new1st].energy/4; dev_status[new1st]=0; } uint adaptive_particle(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, uint *dev_split, uint *dev_status) { if(num_particle == 0) { return num_particle; } uint num_thread; uint num_block; compute_grid_size(num_particle, 256, num_block, num_thread); hipLaunchKernelGGL(( decide_adaptive_kernel), dim3(num_block), dim3(num_thread) , 0, 0, dev_mem, num_particle, dev_split, dev_index); uint num_split=num_particle-thrust::reduce(thrust::device_ptr<uint>(dev_split), thrust::device_ptr<uint>(dev_split + num_particle), (uint) 0, thrust::plus<uint>()); thrust::sort_by_key(thrust::device_ptr<uint>(dev_split), thrust::device_ptr<uint>(dev_split + num_particle), thrust::device_ptr<uint>(dev_index)); if(num_split != 0) { compute_grid_size(num_split, 256, num_block, num_thread); hipLaunchKernelGGL(( adaptive_particle_kernel), dim3(num_block), dim3(num_thread) , 0, 0, dev_mem, dev_split, dev_index, num_particle, num_split, dev_status); num_particle=num_particle+num_split*3; } return num_particle; } __global__ void merge_kernel(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, uint *cell_merge, uint *dev_status) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= total_cell) { return; } uint start_index=cell_start[index]; cell_merge[index]=0; float co_surface=dev_param.co_surface; float co_energy=dev_param.co_energy; if(start_index != 0xffffffff) { uint end_index=cell_end[index]; for(uint count_index=start_index; count_index<end_index; count_index++) { uint p_index=dev_index[count_index]; dev_status[p_index]=0; if(dev_mem[p_index].level != 1 && dev_mem[p_index].energy*co_energy+dev_mem[p_index].surface*co_surface < dev_param.merge_criteria) { cell_merge[index]++; } } if(cell_merge[index] < 4) { return; } float num_merge=cell_merge[index] / 4; uint current=start_index; uint p1st; uint p2nd; uint p3rd; uint p4th; uint p_index; while(num_merge > 0) { //1 while(current < end_index) { p_index=dev_index[current]; if(dev_mem[p_index].level != 1 && dev_mem[p_index].energy*co_energy+dev_mem[p_index].surface*co_surface < dev_param.merge_criteria) { p1st=p_index; current++; break; } current++; } //2 while(current < end_index) { p_index=dev_index[current]; if(dev_mem[p_index].level != 1 && dev_mem[p_index].energy*co_energy+dev_mem[p_index].surface*co_surface < dev_param.merge_criteria) { p2nd=p_index; current++; break; } current++; } //3 while(current < end_index) { p_index=dev_index[current]; if(dev_mem[p_index].level != 1 && dev_mem[p_index].energy*co_energy+dev_mem[p_index].surface*co_surface < dev_param.merge_criteria) { p3rd=p_index; current++; break; } current++; } //4 while(current < end_index) { p_index=dev_index[current]; if(dev_mem[p_index].level != 1 && dev_mem[p_index].energy*co_energy+dev_mem[p_index].surface*co_surface < dev_param.merge_criteria) { p4th=p_index; current++; break; } current++; } dev_mem[p1st].level=1; /*dev_mem[p1st].pos=(dev_mem[p1st].pos+dev_mem[p2nd].pos)/2; dev_mem[p1st].vel=(dev_mem[p1st].vel+dev_mem[p2nd].vel)/2; dev_mem[p1st].ev=(dev_mem[p1st].ev+dev_mem[p2nd].ev)/2; dev_mem[p1st].acc=(dev_mem[p1st].acc+dev_mem[p2nd].acc)/2;*/ ////// float V1; float V2; float V3; float V4; if(dev_mem[p1st].level == 1) { V1=dev_param.large_mass/dev_mem[p1st].dens; } if(dev_mem[p1st].level == 2) { V1=dev_param.small_mass/dev_mem[p1st].dens; } if(dev_mem[p2nd].level == 1) { V2=dev_param.large_mass/dev_mem[p2nd].dens; } if(dev_mem[p2nd].level == 2) { V2=dev_param.small_mass/dev_mem[p2nd].dens; } if(dev_mem[p3rd].level == 1) { V3=dev_param.large_mass/dev_mem[p3rd].dens; } if(dev_mem[p3rd].level == 2) { V3=dev_param.small_mass/dev_mem[p3rd].dens; } if(dev_mem[p4th].level == 1) { V4=dev_param.large_mass/dev_mem[p4th].dens; } if(dev_mem[p4th].level == 2) { V4=dev_param.small_mass/dev_mem[p4th].dens; } dev_mem[p1st].pos=(dev_mem[p1st].pos*V1+dev_mem[p2nd].pos*V2+dev_mem[p3rd].pos*V3+dev_mem[p4th].pos*V4)/(V1+V2+V3+V4); dev_mem[p1st].vel=(dev_mem[p1st].vel*V1+dev_mem[p2nd].vel*V2+dev_mem[p3rd].vel*V3+dev_mem[p4th].vel*V4)/(V1+V2+V3+V4); dev_mem[p1st].ev=(dev_mem[p1st].ev*V1+dev_mem[p2nd].ev*V2+dev_mem[p3rd].ev*V3+dev_mem[p4th].ev*V4)/(V1+V2+V3+V4); dev_mem[p1st].acc=(dev_mem[p1st].acc*V1+dev_mem[p2nd].acc*V2+dev_mem[p3rd].acc*V3+dev_mem[p4th].acc*V4)/(V1+V2+V3+V4); ////// dev_status[p1st]=0; dev_status[p2nd]=1; dev_status[p3rd]=1; dev_status[p4th]=1; num_merge--; } } } void merge(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, uint *cell_merge, uint *dev_status) { uint num_thread; uint num_block; compute_grid_size(total_cell, 256, num_block, num_thread); hipLaunchKernelGGL(( merge_kernel), dim3(num_block), dim3(num_thread), 0, 0, dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell, cell_merge, dev_status); } uint rearrange(Particle *dev_mem, uint *dev_status, uint num_particle) { if(num_particle == 0) { return num_particle; } uint temp_num_particle=num_particle; uint num_threads; uint num_blocks; compute_grid_size(temp_num_particle, 256, num_blocks, num_threads); thrust::sort_by_key(thrust::device_ptr<uint>(dev_status), thrust::device_ptr<uint>(dev_status + temp_num_particle), thrust::device_ptr<Particle>(dev_mem)); temp_num_particle=thrust::reduce(thrust::device_ptr<uint>(dev_status), thrust::device_ptr<uint>(dev_status + temp_num_particle), (uint) 0, thrust::plus<uint>()); num_particle=num_particle-temp_num_particle; return num_particle; } __device__ void integrate_cell_dens(float3 dens_pos, uint index, uint3 cell_pos, Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, uint row_dens, uint col_dens, uint len_dens, uint tot_dens, float den_size, float *dev_dens) { Particle *p; uint particle_index; float radius2; float mass; float poly6_value; uint cell_index=calc_grid_hash(cell_pos); uint start_index=cell_start[cell_index]; if(start_index != 0xffffffff) { uint end_index=cell_end[cell_index]; for(uint count_index=start_index; count_index<end_index; count_index++) { particle_index=dev_index[count_index]; p=&(dev_mem[particle_index]); if(p->level == 1) { radius2=dev_param.large_radius*dev_param.large_radius; //radius2=dev_param.large_kernel_2; mass=dev_param.large_mass; poly6_value=dev_param.large_poly6_radius; } if(p->level == 2) { radius2=dev_param.small_radius*dev_param.small_radius; //radius2=dev_param.small_kernel_2; mass=dev_param.small_mass; poly6_value=dev_param.small_poly6_radius; } float3 rel_pos=p->pos-dens_pos; float r2=rel_pos.x*rel_pos.x+rel_pos.y*rel_pos.y+rel_pos.z*rel_pos.z; if(r2 < radius2) { dev_dens[index]+=mass*poly6_value*pow(radius2-r2, 3); } } } } __global__ void integrate_dens_kernel(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, uint row_dens, uint col_dens, uint len_dens, uint tot_dens, float den_size, float *dev_dens, float3 *dev_dens_pos) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= tot_dens) { return; } uint vox_x=index % (dev_param.row_dens*dev_param.col_dens) % dev_param.row_dens; uint vox_y=index % (dev_param.row_dens*dev_param.col_dens) / dev_param.row_dens; uint vox_z=index / (dev_param.row_dens*dev_param.col_dens); dev_dens[index]=0.0f; if(vox_x==0 || vox_y==0 || vox_z == 0 || vox_x==dev_param.row_dens || vox_y==dev_param.col_dens || vox_z==dev_param.len_dens) { return; } float3 dens_pos=dev_dens_pos[index]; uint3 cell_pos; cell_pos.x=(uint)floor((float)vox_x / (float)(dev_param.row_dens) * dev_param.row_cell); cell_pos.y=(uint)floor((float)vox_y / (float)(dev_param.col_dens) * dev_param.col_cell); cell_pos.z=(uint)floor((float)vox_z / (float)(dev_param.len_dens) * dev_param.len_cell); uint count; uint3 neighbor_pos[27]; uint neighbor_status[27]; for(count=0; count<27; count++) { neighbor_status[count]=1; } neighbor_pos[0].x=cell_pos.x; neighbor_pos[0].y=cell_pos.y; neighbor_pos[0].z=cell_pos.z; neighbor_pos[1].x=cell_pos.x-1; neighbor_pos[1].y=cell_pos.y-1; neighbor_pos[1].z=cell_pos.z; neighbor_pos[2].x=cell_pos.x-1; neighbor_pos[2].y=cell_pos.y; neighbor_pos[2].z=cell_pos.z; neighbor_pos[3].x=cell_pos.x; neighbor_pos[3].y=cell_pos.y-1; neighbor_pos[3].z=cell_pos.z; neighbor_pos[4].x=cell_pos.x+1; neighbor_pos[4].y=cell_pos.y+1; neighbor_pos[4].z=cell_pos.z; neighbor_pos[5].x=cell_pos.x+1; neighbor_pos[5].y=cell_pos.y; neighbor_pos[5].z=cell_pos.z; neighbor_pos[6].x=cell_pos.x; neighbor_pos[6].y=cell_pos.y+1; neighbor_pos[6].z=cell_pos.z; neighbor_pos[7].x=cell_pos.x+1; neighbor_pos[7].y=cell_pos.y-1; neighbor_pos[7].z=cell_pos.z; neighbor_pos[8].x=cell_pos.x-1; neighbor_pos[8].y=cell_pos.y+1; neighbor_pos[8].z=cell_pos.z; neighbor_pos[9].x=cell_pos.x; neighbor_pos[9].y=cell_pos.y; neighbor_pos[9].z=cell_pos.z-1; neighbor_pos[10].x=cell_pos.x-1; neighbor_pos[10].y=cell_pos.y-1; neighbor_pos[10].z=cell_pos.z-1; neighbor_pos[11].x=cell_pos.x-1; neighbor_pos[11].y=cell_pos.y; neighbor_pos[11].z=cell_pos.z-1; neighbor_pos[12].x=cell_pos.x; neighbor_pos[12].y=cell_pos.y-1; neighbor_pos[12].z=cell_pos.z-1; neighbor_pos[13].x=cell_pos.x+1; neighbor_pos[13].y=cell_pos.y+1; neighbor_pos[13].z=cell_pos.z-1; neighbor_pos[14].x=cell_pos.x+1; neighbor_pos[14].y=cell_pos.y; neighbor_pos[14].z=cell_pos.z-1; neighbor_pos[15].x=cell_pos.x; neighbor_pos[15].y=cell_pos.y+1; neighbor_pos[15].z=cell_pos.z-1; neighbor_pos[16].x=cell_pos.x+1; neighbor_pos[16].y=cell_pos.y-1; neighbor_pos[16].z=cell_pos.z-1; neighbor_pos[17].x=cell_pos.x-1; neighbor_pos[17].y=cell_pos.y+1; neighbor_pos[17].z=cell_pos.z-1; neighbor_pos[18].x=cell_pos.x; neighbor_pos[18].y=cell_pos.y; neighbor_pos[18].z=cell_pos.z+1; neighbor_pos[19].x=cell_pos.x-1; neighbor_pos[19].y=cell_pos.y-1; neighbor_pos[19].z=cell_pos.z+1; neighbor_pos[20].x=cell_pos.x-1; neighbor_pos[20].y=cell_pos.y; neighbor_pos[20].z=cell_pos.z+1; neighbor_pos[21].x=cell_pos.x; neighbor_pos[21].y=cell_pos.y-1; neighbor_pos[21].z=cell_pos.z+1; neighbor_pos[22].x=cell_pos.x+1; neighbor_pos[22].y=cell_pos.y+1; neighbor_pos[22].z=cell_pos.z+1; neighbor_pos[23].x=cell_pos.x+1; neighbor_pos[23].y=cell_pos.y; neighbor_pos[23].z=cell_pos.z+1; neighbor_pos[24].x=cell_pos.x; neighbor_pos[24].y=cell_pos.y+1; neighbor_pos[24].z=cell_pos.z+1; neighbor_pos[25].x=cell_pos.x+1; neighbor_pos[25].y=cell_pos.y-1; neighbor_pos[25].z=cell_pos.z+1; neighbor_pos[26].x=cell_pos.x-1; neighbor_pos[26].y=cell_pos.y+1; neighbor_pos[26].z=cell_pos.z+1; if(cell_pos.x == 0) { neighbor_status[1]=0; neighbor_status[2]=0; neighbor_status[8]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[17]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[26]=0; } if(cell_pos.x == dev_param.row_cell-1) { neighbor_status[4]=0; neighbor_status[5]=0; neighbor_status[7]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[16]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[25]=0; } if(cell_pos.y == 0) { neighbor_status[1]=0; neighbor_status[3]=0; neighbor_status[7]=0; neighbor_status[10]=0; neighbor_status[12]=0; neighbor_status[16]=0; neighbor_status[19]=0; neighbor_status[21]=0; neighbor_status[25]=0; } if(cell_pos.y == dev_param.col_cell-1) { neighbor_status[4]=0; neighbor_status[6]=0; neighbor_status[8]=0; neighbor_status[13]=0; neighbor_status[15]=0; neighbor_status[17]=0; neighbor_status[22]=0; neighbor_status[24]=0; neighbor_status[26]=0; } if(cell_pos.z == 0) { neighbor_status[9]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[12]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[15]=0; neighbor_status[16]=0; neighbor_status[17]=0; } if(cell_pos.z == dev_param.len_cell-1) { neighbor_status[18]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[21]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[24]=0; neighbor_status[25]=0; neighbor_status[26]=0; } for(count=0; count<27; count++) { if(neighbor_status[count] == 0) { continue; } integrate_cell_dens(dens_pos, index, neighbor_pos[count], dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell, row_dens, col_dens, len_dens, tot_dens, den_size, dev_dens); } dev_dens[index]=dev_dens[index]; /*if(dev_dens[index] > 0.0f) { dev_dens[index]=1.0f; }*/ } void integrate_dens(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, uint row_dens, uint col_dens, uint len_dens, uint tot_dens, float den_size, float *dev_dens, float3 *dev_dens_pos) { uint num_thread; uint num_block; compute_grid_size(tot_dens, 256, num_block, num_thread); hipLaunchKernelGGL(( integrate_dens_kernel), dim3(num_block), dim3(num_thread) , 0, 0, dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell, row_dens, col_dens, len_dens, tot_dens, den_size, dev_dens, dev_dens_pos); } __global__ void integrate_normall(float *dev_dens, float3 *dev_dens_pos, float3 *dev_dens_normal, uint tot_dens) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= tot_dens) { return; } uint vox_x=index % (dev_param.row_dens*dev_param.col_dens) % dev_param.row_dens; uint vox_y=index % (dev_param.row_dens*dev_param.col_dens) / dev_param.row_dens; uint vox_z=index / (dev_param.row_dens*dev_param.col_dens); if(vox_x == 0) { uint x_b=vox_z*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x+1; float dens_a=0.0f; float dens_b=dev_dens[x_b]; dev_dens_normal[index].x=(dens_a-dens_b)/dev_param.den_size; } else if(vox_x == dev_param.row_dens-1) { uint x_a=vox_z*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x-1; float dens_a=dev_dens[x_a]; float dens_b=0.0f; dev_dens_normal[index].x=(dens_a-dens_b)/dev_param.den_size; } else { uint x_a=vox_z*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x-1; uint x_b=vox_z*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x+1; float dens_a=dev_dens[x_a]; float dens_b=dev_dens[x_b]; dev_dens_normal[index].x=(dens_a-dens_b)/dev_param.den_size; } if(vox_y == 0) { uint x_b=vox_z*dev_param.row_dens*dev_param.col_dens+(vox_y+1)*dev_param.row_dens+vox_x; float dens_a=0.0f; float dens_b=dev_dens[x_b];; dev_dens_normal[index].y=(dens_a-dens_b)/dev_param.den_size; } else if(vox_y == dev_param.col_dens-1) { uint x_a=vox_z*dev_param.row_dens*dev_param.col_dens+(vox_y-1)*dev_param.row_dens+vox_x; float dens_a=dev_dens[x_a];; float dens_b=0.0f; dev_dens_normal[index].y=(dens_a-dens_b)/dev_param.den_size; } else { uint x_a=vox_z*dev_param.row_dens*dev_param.col_dens+(vox_y-1)*dev_param.row_dens+vox_x; uint x_b=vox_z*dev_param.row_dens*dev_param.col_dens+(vox_y+1)*dev_param.row_dens+vox_x; float dens_a=dev_dens[x_a]; float dens_b=dev_dens[x_b]; dev_dens_normal[index].y=(dens_a-dens_b)/dev_param.den_size; } if(vox_z == 0) { uint x_b=(vox_z+1)*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x; float dens_a=0.0f; float dens_b=dev_dens[x_b]; dev_dens_normal[index].z=(dens_a-dens_b)/dev_param.den_size; } else if(vox_z == dev_param.len_dens-1) { uint x_a=(vox_z-1)*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x; float dens_a=dev_dens[x_a]; float dens_b=0.0f; dev_dens_normal[index].z=(dens_a-dens_b)/dev_param.den_size; } else { uint x_a=(vox_z-1)*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x; uint x_b=(vox_z+1)*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x; float dens_a=dev_dens[x_a]; float dens_b=dev_dens[x_b]; dev_dens_normal[index].z=(dens_a-dens_b)/dev_param.den_size; } float normal=sqrt(dev_dens_normal[index].x*dev_dens_normal[index].x+dev_dens_normal[index].y*dev_dens_normal[index].y+dev_dens_normal[index].z*dev_dens_normal[index].z); if(normal == 0.0f) { dev_dens_normal[index]=make_float3(0.0f); } else { dev_dens_normal[index]=dev_dens_normal[index]/normal; } } void integrate_normal(float *dev_dens, float3 *dev_dens_pos, float3 *dev_dens_normal, uint tot_dens) { uint num_thread; uint num_block; compute_grid_size(tot_dens, 256, num_block, num_thread); hipLaunchKernelGGL(( integrate_normall), dim3(num_block), dim3(num_thread) , 0, 0, dev_dens, dev_dens_pos, dev_dens_normal, tot_dens); }
81fe945b9a405d0cc1660326da55d2ca4377de80.cu
#include "sph_header.h" #include "sph_param.h" #include "sph_math.h" #include "sph_kernel.cu" #include <cutil_math.h> #define EXP 2.718281f #define EXT2 1e-12f #define EXT 1e-6f __device__ float wavelet(float input) { float sigma=2.0; return 2.0f/(pow(PI, 0.25f)*pow(3.0f*sigma, 0.5f)) * ((input*input)/(sigma*sigma)-1) * pow(EXP, -(input*input)/(2*sigma*sigma)); } __device__ uint3 calc_grid_pos(float x, float y ,float z) { uint3 cell_pos; cell_pos.x=(uint)floor(x / dev_param.cell_size); cell_pos.y=(uint)floor(y / dev_param.cell_size); cell_pos.z=(uint)floor(z / dev_param.cell_size); return cell_pos; } __device__ uint calc_grid_hash(uint3 cell_pos) { return cell_pos.z*dev_param.row_cell*dev_param.col_cell + cell_pos.y*dev_param.row_cell + cell_pos.x; } void set_parameters(SysParam *host_param) { cudaMemcpyToSymbol((char *)&dev_param, host_param, sizeof(SysParam)); } void alloc_array(void **dev_ptr, size_t size) { cudaMalloc(dev_ptr, size); } void free_array(void *dev_ptr) { cudaFree(dev_ptr); } void copy_array(void *ptr_a, void *ptr_b, size_t size, int type) { if(type == 1) { cudaMemcpy(ptr_a, ptr_b, size, cudaMemcpyHostToDevice); return; } if(type == 2) { cudaMemcpy(ptr_a, ptr_b, size, cudaMemcpyDeviceToHost); return; } if(type == 3) { cudaMemcpy(ptr_a, ptr_b, size, cudaMemcpyDeviceToDevice); return; } return; } void compute_grid_size(uint num_particle, uint block_size, uint &num_blocks, uint &num_threads) { num_threads=min(block_size, num_particle); num_blocks=iDivUp(num_particle, num_threads); } __global__ void calcHashD(uint *dev_hash, uint *dev_index, Particle *dev_mem, uint num_particle) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_particle) { return; } uint3 grid_pos=calc_grid_pos(dev_mem[index].pos.x, dev_mem[index].pos.y, dev_mem[index].pos.z); uint hash=calc_grid_hash(grid_pos); dev_hash[index]=hash; dev_index[index]=index; } void calc_hash(uint* dev_hash, uint* dev_index, Particle *dev_mem, uint num_particle) { if(num_particle == 0) { return; } uint num_threads; uint num_blocks; compute_grid_size(num_particle, 256, num_blocks, num_threads); calcHashD<<< num_blocks, num_threads >>>(dev_hash, dev_index, dev_mem, num_particle); } void sort_particles(uint *dev_hash, uint *dev_index, uint num_particle) { if(num_particle == 0) { return; } thrust::sort_by_key(thrust::device_ptr<uint>(dev_hash), thrust::device_ptr<uint>(dev_hash + num_particle), thrust::device_ptr<uint>(dev_index)); } __global__ void find_start_end_kernel(uint *cell_start, uint *cell_end, uint *dev_hash, uint *dev_index, uint num_particle) { extern __shared__ uint shared_hash[]; uint index=blockIdx.x*blockDim.x+threadIdx.x; uint hash; if(index < num_particle) { hash=dev_hash[index]; shared_hash[threadIdx.x+1]=hash; if(index > 0 && threadIdx.x == 0) { shared_hash[0]=dev_hash[index-1]; } } __syncthreads(); if(index < num_particle) { if(index == 0 || hash != shared_hash[threadIdx.x]) { cell_start[hash]=index; if(index > 0) { cell_end[shared_hash[threadIdx.x]]=index; } } if (index == num_particle-1) { cell_end[hash]=index+1; } } } void find_start_end(uint *cell_start, uint *cell_end, uint *dev_hash, uint *dev_index, uint num_particle, uint num_cell) { if(num_particle == 0) { return; } uint num_thread; uint num_block; compute_grid_size(num_particle, 256, num_block, num_thread); cudaMemset(cell_start, 0xffffffff, num_cell*sizeof(int)); cudaMemset(cell_end, 0x0, num_cell*sizeof(int)); uint smemSize=sizeof(int)*(num_thread+1); find_start_end_kernel<<< num_block, num_thread, smemSize>>>( cell_start, cell_end, dev_hash, dev_index, num_particle); } __global__ void integrate_velocity_kernel(Particle* dev_mem, uint num_particle) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_particle) { return; } Particle *p=&(dev_mem[index]); float radius; if(p->level == 1) { radius=dev_param.large_radius; } if(p->level == 2) { radius=dev_param.small_radius; } //external force if(dev_param.force_waiting_left == true) { if(p->pos.x < dev_param.world_width/3 && p->pos.y > dev_param.world_height/10) { p->acc.x=p->acc.x+20.0f; } } if(dev_param.force_waiting_right == true) { if(p->pos.x > dev_param.world_width/3*2 && p->pos.y > dev_param.world_height/10) { p->acc.x=p->acc.x-20.0f; } } p->vel=p->vel+p->acc*dev_param.time_step/p->dens; p->vel=p->vel+dev_param.gravity*dev_param.time_step/p->dens; p->pos=p->pos+p->vel*dev_param.time_step; if(dev_param.use_cylinder == true) { //handle collision with the sphere 1 float3 dist_vec=dev_mem[index].pos-dev_param.sphere1.pos; float distance=sqrt(dist_vec.x*dist_vec.x+dist_vec.y*dist_vec.y+dist_vec.z*dist_vec.z); if(distance < dev_param.sphere1.radius+radius) { float3 poxyz=dev_mem[index].pos-dev_param.sphere1.pos; float3 dxyz=poxyz/distance*(dev_param.sphere1.radius+radius); float3 normal=dxyz; normal=normalize(normal); dev_mem[index].pos=dev_param.sphere1.pos+dxyz; dev_mem[index].vel=dev_mem[index].vel-dev_param.sphere1.damping*(dot(dev_mem[index].vel, normal))*normal; } //handle collision with the sphere 2 dist_vec=dev_mem[index].pos-dev_param.sphere2.pos; distance=sqrt(dist_vec.x*dist_vec.x+dist_vec.y*dist_vec.y+dist_vec.z*dist_vec.z); if(distance < dev_param.sphere2.radius+radius) { float3 poxyz=dev_mem[index].pos-dev_param.sphere2.pos; float3 dxyz=poxyz/distance*(dev_param.sphere2.radius+radius); float3 normal=dxyz; normal=normalize(normal); dev_mem[index].pos=dev_param.sphere2.pos+dxyz; dev_mem[index].vel=dev_mem[index].vel-dev_param.sphere2.damping*(dot(dev_mem[index].vel, normal))*normal; } //handle collision with the sphere 3 dist_vec=dev_mem[index].pos-dev_param.sphere3.pos; distance=sqrt(dist_vec.x*dist_vec.x+dist_vec.y*dist_vec.y+dist_vec.z*dist_vec.z); if(distance < dev_param.sphere3.radius+radius) { float3 poxyz=dev_mem[index].pos-dev_param.sphere3.pos; float3 dxyz=poxyz/distance*(dev_param.sphere3.radius+radius); float3 normal=dxyz; normal=normalize(normal); dev_mem[index].pos=dev_param.sphere3.pos+dxyz; dev_mem[index].vel=dev_mem[index].vel-dev_param.sphere3.damping*(dot(dev_mem[index].vel, normal))*normal; } } if(p->pos.x >= dev_param.world_width-radius) { p->vel.x=p->vel.x*dev_param.wall_damping; p->pos.x=dev_param.world_width-radius; } if(p->pos.x < radius) { p->vel.x=p->vel.x*dev_param.wall_damping; p->pos.x=radius; } if(p->pos.y >= dev_param.world_height-radius) { p->vel.y=p->vel.y*dev_param.wall_damping; p->pos.y=dev_param.world_height-radius; } if(p->pos.y < radius) { p->vel.y=p->vel.y*dev_param.wall_damping; p->pos.y=radius; } if(p->pos.z >= dev_param.world_length-radius) { p->vel.z=p->vel.z*dev_param.wall_damping; p->pos.z=dev_param.world_length-radius; } if(p->pos.z < radius) { p->vel.z=p->vel.z*dev_param.wall_damping; p->pos.z=radius; } p->ev=(p->ev+p->vel)/2; } void integrate_velocity(Particle *dev_mem, uint num_particle) { if(num_particle == 0) { return; } uint num_thread; uint num_block; compute_grid_size(num_particle, 256, num_block, num_thread); integrate_velocity_kernel<<< num_block, num_thread >>>(dev_mem, num_particle); } __device__ float compute_cell_density(uint index, uint3 neighbor, Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell) { uint grid_hash=calc_grid_hash(neighbor); uint start_index=cell_start[grid_hash]; float total_cell_density=0.0f; float i_mass; float i_kernel_2; float i_kernel_6; float i_poly6_value; float j_mass; float3 rel_pos; float r2; Particle *p=&(dev_mem[index]); Particle *np; uint neighbor_index; if(p->level == 1) { i_mass=dev_param.large_mass; i_kernel_2=dev_param.large_kernel_2; i_kernel_6=dev_param.large_kernel_6; i_poly6_value=dev_param.large_poly6; } if(p->level == 2) { i_mass=dev_param.small_mass; i_kernel_2=dev_param.small_kernel_2; i_kernel_6=dev_param.small_kernel_6; i_poly6_value=dev_param.small_poly6; } if(start_index != 0xffffffff) { uint end_index=cell_end[grid_hash]; for(uint count_index=start_index; count_index<end_index; count_index++) { neighbor_index=dev_index[count_index]; np=&(dev_mem[neighbor_index]); if(neighbor_index != index) { rel_pos=np->pos-p->pos; r2=rel_pos.x*rel_pos.x+rel_pos.y*rel_pos.y+rel_pos.z*rel_pos.z; if(r2 < 0.0001f) { continue; } if(r2 < i_kernel_2) { if(np->level == 1) { j_mass=dev_param.large_mass; } if(np->level == 2) { j_mass=dev_param.small_mass; } total_cell_density=total_cell_density + j_mass * i_poly6_value * pow(i_kernel_2-r2, 3); } } if(neighbor_index == index) { total_cell_density=total_cell_density + i_mass * i_poly6_value * i_kernel_6; } } } return total_cell_density; } __global__ void compute_density_kernel(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_particle) { return; } uint3 grid_pos=calc_grid_pos(dev_mem[index].pos.x, dev_mem[index].pos.y, dev_mem[index].pos.z); uint count; uint3 neighbor_pos[27]; uint neighbor_status[27]; for(count=0; count<27; count++) { neighbor_status[count]=1; } neighbor_pos[0].x=grid_pos.x; neighbor_pos[0].y=grid_pos.y; neighbor_pos[0].z=grid_pos.z; neighbor_pos[1].x=grid_pos.x-1; neighbor_pos[1].y=grid_pos.y-1; neighbor_pos[1].z=grid_pos.z; neighbor_pos[2].x=grid_pos.x-1; neighbor_pos[2].y=grid_pos.y; neighbor_pos[2].z=grid_pos.z; neighbor_pos[3].x=grid_pos.x; neighbor_pos[3].y=grid_pos.y-1; neighbor_pos[3].z=grid_pos.z; neighbor_pos[4].x=grid_pos.x+1; neighbor_pos[4].y=grid_pos.y+1; neighbor_pos[4].z=grid_pos.z; neighbor_pos[5].x=grid_pos.x+1; neighbor_pos[5].y=grid_pos.y; neighbor_pos[5].z=grid_pos.z; neighbor_pos[6].x=grid_pos.x; neighbor_pos[6].y=grid_pos.y+1; neighbor_pos[6].z=grid_pos.z; neighbor_pos[7].x=grid_pos.x+1; neighbor_pos[7].y=grid_pos.y-1; neighbor_pos[7].z=grid_pos.z; neighbor_pos[8].x=grid_pos.x-1; neighbor_pos[8].y=grid_pos.y+1; neighbor_pos[8].z=grid_pos.z; neighbor_pos[9].x=grid_pos.x; neighbor_pos[9].y=grid_pos.y; neighbor_pos[9].z=grid_pos.z-1; neighbor_pos[10].x=grid_pos.x-1; neighbor_pos[10].y=grid_pos.y-1; neighbor_pos[10].z=grid_pos.z-1; neighbor_pos[11].x=grid_pos.x-1; neighbor_pos[11].y=grid_pos.y; neighbor_pos[11].z=grid_pos.z-1; neighbor_pos[12].x=grid_pos.x; neighbor_pos[12].y=grid_pos.y-1; neighbor_pos[12].z=grid_pos.z-1; neighbor_pos[13].x=grid_pos.x+1; neighbor_pos[13].y=grid_pos.y+1; neighbor_pos[13].z=grid_pos.z-1; neighbor_pos[14].x=grid_pos.x+1; neighbor_pos[14].y=grid_pos.y; neighbor_pos[14].z=grid_pos.z-1; neighbor_pos[15].x=grid_pos.x; neighbor_pos[15].y=grid_pos.y+1; neighbor_pos[15].z=grid_pos.z-1; neighbor_pos[16].x=grid_pos.x+1; neighbor_pos[16].y=grid_pos.y-1; neighbor_pos[16].z=grid_pos.z-1; neighbor_pos[17].x=grid_pos.x-1; neighbor_pos[17].y=grid_pos.y+1; neighbor_pos[17].z=grid_pos.z-1; neighbor_pos[18].x=grid_pos.x; neighbor_pos[18].y=grid_pos.y; neighbor_pos[18].z=grid_pos.z+1; neighbor_pos[19].x=grid_pos.x-1; neighbor_pos[19].y=grid_pos.y-1; neighbor_pos[19].z=grid_pos.z+1; neighbor_pos[20].x=grid_pos.x-1; neighbor_pos[20].y=grid_pos.y; neighbor_pos[20].z=grid_pos.z+1; neighbor_pos[21].x=grid_pos.x; neighbor_pos[21].y=grid_pos.y-1; neighbor_pos[21].z=grid_pos.z+1; neighbor_pos[22].x=grid_pos.x+1; neighbor_pos[22].y=grid_pos.y+1; neighbor_pos[22].z=grid_pos.z+1; neighbor_pos[23].x=grid_pos.x+1; neighbor_pos[23].y=grid_pos.y; neighbor_pos[23].z=grid_pos.z+1; neighbor_pos[24].x=grid_pos.x; neighbor_pos[24].y=grid_pos.y+1; neighbor_pos[24].z=grid_pos.z+1; neighbor_pos[25].x=grid_pos.x+1; neighbor_pos[25].y=grid_pos.y-1; neighbor_pos[25].z=grid_pos.z+1; neighbor_pos[26].x=grid_pos.x-1; neighbor_pos[26].y=grid_pos.y+1; neighbor_pos[26].z=grid_pos.z+1; if(grid_pos.x == 0) { neighbor_status[1]=0; neighbor_status[2]=0; neighbor_status[8]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[17]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[26]=0; } if(grid_pos.x == dev_param.row_cell-1) { neighbor_status[4]=0; neighbor_status[5]=0; neighbor_status[7]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[16]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[25]=0; } if(grid_pos.y == 0) { neighbor_status[1]=0; neighbor_status[3]=0; neighbor_status[7]=0; neighbor_status[10]=0; neighbor_status[12]=0; neighbor_status[16]=0; neighbor_status[19]=0; neighbor_status[21]=0; neighbor_status[25]=0; } if(grid_pos.y == dev_param.col_cell-1) { neighbor_status[4]=0; neighbor_status[6]=0; neighbor_status[8]=0; neighbor_status[13]=0; neighbor_status[15]=0; neighbor_status[17]=0; neighbor_status[22]=0; neighbor_status[24]=0; neighbor_status[26]=0; } if(grid_pos.z == 0) { neighbor_status[9]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[12]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[15]=0; neighbor_status[16]=0; neighbor_status[17]=0; } if(grid_pos.z == dev_param.len_cell-1) { neighbor_status[18]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[21]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[24]=0; neighbor_status[25]=0; neighbor_status[26]=0; } float total_density=0; for(count=0; count<27; count++) { if(neighbor_status[count] == 0) { continue; } total_density=total_density+compute_cell_density(index, neighbor_pos[count], dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell); } dev_mem[index].dens=total_density; dev_mem[index].press=(pow(dev_mem[index].dens / dev_param.rest_density, 3) - 1) * dev_param.gas_constant; //dev_mem[index].press=(dev_mem[index].dens / dev_param.rest_density - 1) * dev_param.gas_constant; } __device__ float3 compute_cell_force(uint index, uint3 neighbor, Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, float4 *surface) { uint grid_hash=calc_grid_hash(neighbor); uint start_index=cell_start[grid_hash]; float3 total_cell_force=make_float3(0.0f); float i_kernel; float i_mass; float i_kernel_2; float i_kernel_6; float i_poly6_value; float i_spiky_value; float i_visco_value; float i_grad_poly6; float i_lapc_poly6; float j_kernel; float j_mass; float j_kernel_2; float j_kernel_6; float j_spiky_value; float j_visco_value; float i_press_kernel; float i_visco_kernel; float j_press_kernel; float j_visco_kernel; float i_kernel_r; float j_kernel_r; float iV; float jV; uint neighbor_index; Particle *p=&(dev_mem[index]); Particle *np; float3 rel_pos; float r2; float r; float temp_force; float3 rel_vel; if(p->level == 1) { i_kernel=dev_param.large_kernel; i_mass=dev_param.large_mass; i_kernel_2=dev_param.large_kernel_2; i_kernel_6=dev_param.large_kernel_6; i_poly6_value=dev_param.large_poly6; i_spiky_value=dev_param.large_spiky; i_visco_value=dev_param.large_visco; i_grad_poly6=dev_param.large_grad_poly6; i_lapc_poly6=dev_param.large_lapc_poly6; } if(p->level == 2) { i_kernel=dev_param.small_kernel; i_mass=dev_param.small_mass; i_kernel_2=dev_param.small_kernel_2; i_kernel_6=dev_param.small_kernel_6; i_poly6_value=dev_param.small_poly6; i_spiky_value=dev_param.small_spiky; i_visco_value=dev_param.small_visco; i_grad_poly6=dev_param.small_grad_poly6; i_lapc_poly6=dev_param.small_lapc_poly6; } if(start_index != 0xffffffff) { uint end_index=cell_end[grid_hash]; for(uint count_index=start_index; count_index<end_index; count_index++) { neighbor_index=dev_index[count_index]; np=&(dev_mem[neighbor_index]); if(neighbor_index != index) { rel_pos=p->pos-np->pos; r2=rel_pos.x*rel_pos.x+rel_pos.y*rel_pos.y+rel_pos.z*rel_pos.z; if(np->level == 1) { j_kernel=dev_param.large_kernel; j_mass=dev_param.large_mass; j_kernel_2=dev_param.large_kernel_2; j_kernel_6=dev_param.large_kernel_6; j_spiky_value=dev_param.large_spiky; j_visco_value=dev_param.large_visco; } if(np->level == 2) { j_kernel=dev_param.small_kernel; j_mass=dev_param.small_mass; j_kernel_2=dev_param.small_kernel_2; j_kernel_6=dev_param.small_kernel_6; j_spiky_value=dev_param.small_spiky; j_visco_value=dev_param.small_visco; } float max_kernel_2=i_kernel_2>j_kernel_2?i_kernel_2:j_kernel_2; if(r2 < max_kernel_2) { if(r2 < 0.0001f) { continue; } else { r=sqrt(r2); } iV=i_mass/p->dens; jV=j_mass/np->dens; i_kernel_r=i_kernel-r; j_kernel_r=j_kernel-r; if(i_kernel_r > 0) { i_press_kernel=i_spiky_value * i_kernel_r * i_kernel_r / i_kernel_6; i_visco_kernel=i_visco_value/i_kernel_6*(i_kernel_r); //surface tension float temp=(-1) * i_grad_poly6 * j_mass / np->dens * pow(i_kernel_2-r2, 2); surface->x += temp * rel_pos.x; surface->y += temp * rel_pos.y; surface->z += temp * rel_pos.z; surface->w += i_lapc_poly6 * j_mass / np->dens * (i_kernel_2-r2) * (r2-3/4*(i_kernel_2-r2)); } else { i_press_kernel=0.0f; i_visco_kernel=0.0f; } if(j_kernel_r > 0) { j_press_kernel=j_spiky_value * j_kernel_r * j_kernel_r / j_kernel_6; j_visco_kernel=j_visco_value/j_kernel_6*(j_kernel_r); } else { j_press_kernel=0.0f; j_visco_kernel=0.0f; } temp_force=i_mass*j_mass * (p->press/(p->dens*p->dens)+np->press/(np->dens*np->dens)) * (i_press_kernel+j_press_kernel)/2; total_cell_force=total_cell_force-rel_pos*temp_force/r; rel_vel=np->ev-p->ev; temp_force=(iV*jV) * dev_param.viscosity * (i_visco_kernel+j_visco_kernel)/2; total_cell_force=total_cell_force + rel_vel*temp_force; } } if(neighbor_index == index) { } } } return total_cell_force; } __global__ void compute_force_kernel(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_particle) { return; } uint3 grid_pos=calc_grid_pos(dev_mem[index].pos.x, dev_mem[index].pos.y, dev_mem[index].pos.z); uint count; uint3 neighbor_pos[27]; uint neighbor_status[27]; for(count=0; count<27; count++) { neighbor_status[count]=1; } neighbor_pos[0].x=grid_pos.x; neighbor_pos[0].y=grid_pos.y; neighbor_pos[0].z=grid_pos.z; neighbor_pos[1].x=grid_pos.x-1; neighbor_pos[1].y=grid_pos.y-1; neighbor_pos[1].z=grid_pos.z; neighbor_pos[2].x=grid_pos.x-1; neighbor_pos[2].y=grid_pos.y; neighbor_pos[2].z=grid_pos.z; neighbor_pos[3].x=grid_pos.x; neighbor_pos[3].y=grid_pos.y-1; neighbor_pos[3].z=grid_pos.z; neighbor_pos[4].x=grid_pos.x+1; neighbor_pos[4].y=grid_pos.y+1; neighbor_pos[4].z=grid_pos.z; neighbor_pos[5].x=grid_pos.x+1; neighbor_pos[5].y=grid_pos.y; neighbor_pos[5].z=grid_pos.z; neighbor_pos[6].x=grid_pos.x; neighbor_pos[6].y=grid_pos.y+1; neighbor_pos[6].z=grid_pos.z; neighbor_pos[7].x=grid_pos.x+1; neighbor_pos[7].y=grid_pos.y-1; neighbor_pos[7].z=grid_pos.z; neighbor_pos[8].x=grid_pos.x-1; neighbor_pos[8].y=grid_pos.y+1; neighbor_pos[8].z=grid_pos.z; neighbor_pos[9].x=grid_pos.x; neighbor_pos[9].y=grid_pos.y; neighbor_pos[9].z=grid_pos.z-1; neighbor_pos[10].x=grid_pos.x-1; neighbor_pos[10].y=grid_pos.y-1; neighbor_pos[10].z=grid_pos.z-1; neighbor_pos[11].x=grid_pos.x-1; neighbor_pos[11].y=grid_pos.y; neighbor_pos[11].z=grid_pos.z-1; neighbor_pos[12].x=grid_pos.x; neighbor_pos[12].y=grid_pos.y-1; neighbor_pos[12].z=grid_pos.z-1; neighbor_pos[13].x=grid_pos.x+1; neighbor_pos[13].y=grid_pos.y+1; neighbor_pos[13].z=grid_pos.z-1; neighbor_pos[14].x=grid_pos.x+1; neighbor_pos[14].y=grid_pos.y; neighbor_pos[14].z=grid_pos.z-1; neighbor_pos[15].x=grid_pos.x; neighbor_pos[15].y=grid_pos.y+1; neighbor_pos[15].z=grid_pos.z-1; neighbor_pos[16].x=grid_pos.x+1; neighbor_pos[16].y=grid_pos.y-1; neighbor_pos[16].z=grid_pos.z-1; neighbor_pos[17].x=grid_pos.x-1; neighbor_pos[17].y=grid_pos.y+1; neighbor_pos[17].z=grid_pos.z-1; neighbor_pos[18].x=grid_pos.x; neighbor_pos[18].y=grid_pos.y; neighbor_pos[18].z=grid_pos.z+1; neighbor_pos[19].x=grid_pos.x-1; neighbor_pos[19].y=grid_pos.y-1; neighbor_pos[19].z=grid_pos.z+1; neighbor_pos[20].x=grid_pos.x-1; neighbor_pos[20].y=grid_pos.y; neighbor_pos[20].z=grid_pos.z+1; neighbor_pos[21].x=grid_pos.x; neighbor_pos[21].y=grid_pos.y-1; neighbor_pos[21].z=grid_pos.z+1; neighbor_pos[22].x=grid_pos.x+1; neighbor_pos[22].y=grid_pos.y+1; neighbor_pos[22].z=grid_pos.z+1; neighbor_pos[23].x=grid_pos.x+1; neighbor_pos[23].y=grid_pos.y; neighbor_pos[23].z=grid_pos.z+1; neighbor_pos[24].x=grid_pos.x; neighbor_pos[24].y=grid_pos.y+1; neighbor_pos[24].z=grid_pos.z+1; neighbor_pos[25].x=grid_pos.x+1; neighbor_pos[25].y=grid_pos.y-1; neighbor_pos[25].z=grid_pos.z+1; neighbor_pos[26].x=grid_pos.x-1; neighbor_pos[26].y=grid_pos.y+1; neighbor_pos[26].z=grid_pos.z+1; if(grid_pos.x == 0) { neighbor_status[1]=0; neighbor_status[2]=0; neighbor_status[8]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[17]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[26]=0; } if(grid_pos.x == dev_param.row_cell-1) { neighbor_status[4]=0; neighbor_status[5]=0; neighbor_status[7]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[16]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[25]=0; } if(grid_pos.y == 0) { neighbor_status[1]=0; neighbor_status[3]=0; neighbor_status[7]=0; neighbor_status[10]=0; neighbor_status[12]=0; neighbor_status[16]=0; neighbor_status[19]=0; neighbor_status[21]=0; neighbor_status[25]=0; } if(grid_pos.y == dev_param.col_cell-1) { neighbor_status[4]=0; neighbor_status[6]=0; neighbor_status[8]=0; neighbor_status[13]=0; neighbor_status[15]=0; neighbor_status[17]=0; neighbor_status[22]=0; neighbor_status[24]=0; neighbor_status[26]=0; } if(grid_pos.z == 0) { neighbor_status[9]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[12]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[15]=0; neighbor_status[16]=0; neighbor_status[17]=0; } if(grid_pos.z == dev_param.len_cell-1) { neighbor_status[18]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[21]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[24]=0; neighbor_status[25]=0; neighbor_status[26]=0; } float3 total_force=make_float3(0.0f, 0.0f, 0.0f); float4 surface=make_float4(0.0f, 0.0f, 0.0f, 0.0f); for(count=0; count<27; count++) { if(neighbor_status[count] == 0) { continue; } total_force=total_force+compute_cell_force(index, neighbor_pos[count], dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell, &surface); } float3 grad; float lapc; grad.x=surface.x; grad.y=surface.y; grad.z=surface.z; lapc=surface.w; float3 force; float normal; normal=sqrt(grad.x*grad.x+grad.y*grad.y+grad.z*grad.z); dev_mem[index].surface=normal; if(normal > dev_param.surface_tension) { force=0.02f * lapc * grad / normal; } else { force=make_float3(0.0f, 0.0f, 0.0f); } dev_mem[index].acc=total_force+force; } void compute(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell) { uint num_thread; uint num_block; compute_grid_size(num_particle, 256, num_block, num_thread); compute_density_kernel<<< num_block, num_thread >>>(dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell); compute_force_kernel<<< num_block, num_thread >>>(dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell); } __device__ float3 compute_cell_energy(uint index, uint3 neighbor, Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, float3 *trans_vel) { uint grid_hash=calc_grid_hash(neighbor); uint start_index=cell_start[grid_hash]; float3 cell_trans=make_float3(0.0f, 0.0f, 0.0f); float3 transform=make_float3(0.0f, 0.0f, 0.0f); uint neighbor_index; Particle *p=&(dev_mem[index]); Particle *np; float3 rel_pos; float r2; float r; float kernel; float kernel2; if(p->level == 1) { kernel=dev_param.large_kernel; kernel2=dev_param.large_kernel_2; } if(p->level == 2) { kernel=dev_param.large_kernel; kernel2=dev_param.large_kernel_2; } if(start_index != 0xffffffff) { uint end_index=cell_end[grid_hash]; for(uint count_index=start_index; count_index<end_index; count_index++) { neighbor_index=dev_index[count_index]; np=&(dev_mem[neighbor_index]); if(neighbor_index != index) { rel_pos=p->pos-np->pos; r2=rel_pos.x*rel_pos.x+rel_pos.y*rel_pos.y+rel_pos.z*rel_pos.z; if(r2 < kernel2) { if(r2 < 0.0001f) { continue; } else { r=sqrt(r2); } transform.x=wavelet((p->pos.x-np->pos.x)/kernel); transform.y=wavelet((p->pos.y-np->pos.y)/kernel); transform.z=wavelet((p->pos.z-np->pos.z)/kernel); cell_trans=cell_trans+transform; trans_vel[index].x=trans_vel[index].x+(np->vel.x*transform.x/pow(kernel, 0.5f)); trans_vel[index].y=trans_vel[index].y+(np->vel.y*transform.y/pow(kernel, 0.5f)); trans_vel[index].z=trans_vel[index].z+(np->vel.z*transform.z/pow(kernel, 0.5f)); } } if(neighbor_index == index) { } } } return cell_trans; } __global__ void compute_energy_kernel(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, float3 *trans_vel) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_particle) { return; } uint3 grid_pos=calc_grid_pos(dev_mem[index].pos.x, dev_mem[index].pos.y, dev_mem[index].pos.z); uint count; uint3 neighbor_pos[27]; uint neighbor_status[27]; for(count=0; count<27; count++) { neighbor_status[count]=1; } neighbor_pos[0].x=grid_pos.x; neighbor_pos[0].y=grid_pos.y; neighbor_pos[0].z=grid_pos.z; neighbor_pos[1].x=grid_pos.x-1; neighbor_pos[1].y=grid_pos.y-1; neighbor_pos[1].z=grid_pos.z; neighbor_pos[2].x=grid_pos.x-1; neighbor_pos[2].y=grid_pos.y; neighbor_pos[2].z=grid_pos.z; neighbor_pos[3].x=grid_pos.x; neighbor_pos[3].y=grid_pos.y-1; neighbor_pos[3].z=grid_pos.z; neighbor_pos[4].x=grid_pos.x+1; neighbor_pos[4].y=grid_pos.y+1; neighbor_pos[4].z=grid_pos.z; neighbor_pos[5].x=grid_pos.x+1; neighbor_pos[5].y=grid_pos.y; neighbor_pos[5].z=grid_pos.z; neighbor_pos[6].x=grid_pos.x; neighbor_pos[6].y=grid_pos.y+1; neighbor_pos[6].z=grid_pos.z; neighbor_pos[7].x=grid_pos.x+1; neighbor_pos[7].y=grid_pos.y-1; neighbor_pos[7].z=grid_pos.z; neighbor_pos[8].x=grid_pos.x-1; neighbor_pos[8].y=grid_pos.y+1; neighbor_pos[8].z=grid_pos.z; neighbor_pos[9].x=grid_pos.x; neighbor_pos[9].y=grid_pos.y; neighbor_pos[9].z=grid_pos.z-1; neighbor_pos[10].x=grid_pos.x-1; neighbor_pos[10].y=grid_pos.y-1; neighbor_pos[10].z=grid_pos.z-1; neighbor_pos[11].x=grid_pos.x-1; neighbor_pos[11].y=grid_pos.y; neighbor_pos[11].z=grid_pos.z-1; neighbor_pos[12].x=grid_pos.x; neighbor_pos[12].y=grid_pos.y-1; neighbor_pos[12].z=grid_pos.z-1; neighbor_pos[13].x=grid_pos.x+1; neighbor_pos[13].y=grid_pos.y+1; neighbor_pos[13].z=grid_pos.z-1; neighbor_pos[14].x=grid_pos.x+1; neighbor_pos[14].y=grid_pos.y; neighbor_pos[14].z=grid_pos.z-1; neighbor_pos[15].x=grid_pos.x; neighbor_pos[15].y=grid_pos.y+1; neighbor_pos[15].z=grid_pos.z-1; neighbor_pos[16].x=grid_pos.x+1; neighbor_pos[16].y=grid_pos.y-1; neighbor_pos[16].z=grid_pos.z-1; neighbor_pos[17].x=grid_pos.x-1; neighbor_pos[17].y=grid_pos.y+1; neighbor_pos[17].z=grid_pos.z-1; neighbor_pos[18].x=grid_pos.x; neighbor_pos[18].y=grid_pos.y; neighbor_pos[18].z=grid_pos.z+1; neighbor_pos[19].x=grid_pos.x-1; neighbor_pos[19].y=grid_pos.y-1; neighbor_pos[19].z=grid_pos.z+1; neighbor_pos[20].x=grid_pos.x-1; neighbor_pos[20].y=grid_pos.y; neighbor_pos[20].z=grid_pos.z+1; neighbor_pos[21].x=grid_pos.x; neighbor_pos[21].y=grid_pos.y-1; neighbor_pos[21].z=grid_pos.z+1; neighbor_pos[22].x=grid_pos.x+1; neighbor_pos[22].y=grid_pos.y+1; neighbor_pos[22].z=grid_pos.z+1; neighbor_pos[23].x=grid_pos.x+1; neighbor_pos[23].y=grid_pos.y; neighbor_pos[23].z=grid_pos.z+1; neighbor_pos[24].x=grid_pos.x; neighbor_pos[24].y=grid_pos.y+1; neighbor_pos[24].z=grid_pos.z+1; neighbor_pos[25].x=grid_pos.x+1; neighbor_pos[25].y=grid_pos.y-1; neighbor_pos[25].z=grid_pos.z+1; neighbor_pos[26].x=grid_pos.x-1; neighbor_pos[26].y=grid_pos.y+1; neighbor_pos[26].z=grid_pos.z+1; if(grid_pos.x == 0) { neighbor_status[1]=0; neighbor_status[2]=0; neighbor_status[8]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[17]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[26]=0; } if(grid_pos.x == dev_param.row_cell-1) { neighbor_status[4]=0; neighbor_status[5]=0; neighbor_status[7]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[16]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[25]=0; } if(grid_pos.y == 0) { neighbor_status[1]=0; neighbor_status[3]=0; neighbor_status[7]=0; neighbor_status[10]=0; neighbor_status[12]=0; neighbor_status[16]=0; neighbor_status[19]=0; neighbor_status[21]=0; neighbor_status[25]=0; } if(grid_pos.y == dev_param.col_cell-1) { neighbor_status[4]=0; neighbor_status[6]=0; neighbor_status[8]=0; neighbor_status[13]=0; neighbor_status[15]=0; neighbor_status[17]=0; neighbor_status[22]=0; neighbor_status[24]=0; neighbor_status[26]=0; } if(grid_pos.z == 0) { neighbor_status[9]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[12]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[15]=0; neighbor_status[16]=0; neighbor_status[17]=0; } if(grid_pos.z == dev_param.len_cell-1) { neighbor_status[18]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[21]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[24]=0; neighbor_status[25]=0; neighbor_status[26]=0; } float3 total_force=make_float3(0.0f, 0.0f, 0.0f); float3 total_trans=make_float3(0.0f, 0.0f, 0.0f); trans_vel[index]=make_float3(0.0f, 0.0f, 0.0f); for(count=0; count<27; count++) { if(neighbor_status[count] == 0) { continue; } total_trans=total_trans+compute_cell_energy(index, neighbor_pos[count], dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell, trans_vel); } float mass; if(dev_mem[index].level == 1) { mass=dev_param.large_mass; } if(dev_mem[index].level == 2) { mass=dev_param.small_mass; } trans_vel[index]=trans_vel[index]/total_trans; dev_mem[index].energy=0.5f*mass*(trans_vel[index].x*trans_vel[index].x+trans_vel[index].y*trans_vel[index].y); } void compute_energy(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, float3 *trans_vel) { uint num_thread; uint num_block; compute_grid_size(num_particle, 256, num_block, num_thread); compute_energy_kernel<<<num_block, num_thread>>>(dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell, trans_vel); } __global__ void decide_adaptive_kernel(Particle *dev_mem, uint num_particle, uint *dev_split, uint *dev_index) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_particle) { return; } float co_surface=dev_param.co_surface; float co_energy=dev_param.co_energy; if(dev_mem[index].energy*co_energy+dev_mem[index].surface*co_surface > dev_param.split_criteria && dev_mem[index].level == 1) { dev_split[index]=0; } else { dev_split[index]=1; } dev_index[index]=index; } __global__ void adaptive_particle_kernel(Particle *dev_mem, uint *dev_split, uint *dev_index, uint num_particle, uint num_split, uint *dev_status) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= num_split) { return; } uint new1st=dev_index[index]; //uint new2nd=num_particle+index; uint new2nd=num_particle+index*3+0; uint new3rd=num_particle+index*3+1; uint new4th=num_particle+index*3+2; //2 dev_mem[new2nd].level=2; dev_mem[new2nd].pos.x=dev_mem[new1st].pos.x-dev_param.small_kernel/2; dev_mem[new2nd].pos.y=dev_mem[new1st].pos.y; dev_mem[new2nd].pos.z=dev_mem[new1st].pos.z; dev_mem[new2nd].vel=dev_mem[new1st].vel; dev_mem[new2nd].ev=dev_mem[new1st].ev; dev_mem[new2nd].acc=dev_mem[new1st].acc; dev_mem[new2nd].energy=dev_mem[new1st].energy/4; dev_status[new2nd]=0; //3 dev_mem[new3rd].level=2; dev_mem[new3rd].pos.x=dev_mem[new1st].pos.x; dev_mem[new3rd].pos.y=dev_mem[new1st].pos.y; dev_mem[new3rd].pos.z=dev_mem[new1st].pos.z-dev_param.small_kernel/2; dev_mem[new3rd].vel=dev_mem[new1st].vel; dev_mem[new3rd].ev=dev_mem[new1st].ev; dev_mem[new3rd].acc=dev_mem[new1st].acc; dev_mem[new3rd].energy=dev_mem[new1st].energy/4; dev_status[new3rd]=0; //4 dev_mem[new4th].level=2; dev_mem[new4th].pos.x=dev_mem[new1st].pos.x; dev_mem[new4th].pos.y=dev_mem[new1st].pos.y; dev_mem[new4th].pos.z=dev_mem[new1st].pos.z+dev_param.small_kernel/2; dev_mem[new4th].vel=dev_mem[new1st].vel; dev_mem[new4th].ev=dev_mem[new1st].ev; dev_mem[new4th].acc=dev_mem[new1st].acc; dev_mem[new4th].energy=dev_mem[new1st].energy/4; dev_status[new4th]=0; //1 dev_mem[new1st].level=2; dev_mem[new1st].pos.x=dev_mem[new1st].pos.x+dev_param.small_kernel/2; dev_mem[new1st].pos.y=dev_mem[new1st].pos.y; dev_mem[new1st].energy=dev_mem[new1st].energy/4; dev_status[new1st]=0; } uint adaptive_particle(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, uint *dev_split, uint *dev_status) { if(num_particle == 0) { return num_particle; } uint num_thread; uint num_block; compute_grid_size(num_particle, 256, num_block, num_thread); decide_adaptive_kernel<<< num_block, num_thread >>>(dev_mem, num_particle, dev_split, dev_index); uint num_split=num_particle-thrust::reduce(thrust::device_ptr<uint>(dev_split), thrust::device_ptr<uint>(dev_split + num_particle), (uint) 0, thrust::plus<uint>()); thrust::sort_by_key(thrust::device_ptr<uint>(dev_split), thrust::device_ptr<uint>(dev_split + num_particle), thrust::device_ptr<uint>(dev_index)); if(num_split != 0) { compute_grid_size(num_split, 256, num_block, num_thread); adaptive_particle_kernel<<< num_block, num_thread >>>(dev_mem, dev_split, dev_index, num_particle, num_split, dev_status); num_particle=num_particle+num_split*3; } return num_particle; } __global__ void merge_kernel(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, uint *cell_merge, uint *dev_status) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= total_cell) { return; } uint start_index=cell_start[index]; cell_merge[index]=0; float co_surface=dev_param.co_surface; float co_energy=dev_param.co_energy; if(start_index != 0xffffffff) { uint end_index=cell_end[index]; for(uint count_index=start_index; count_index<end_index; count_index++) { uint p_index=dev_index[count_index]; dev_status[p_index]=0; if(dev_mem[p_index].level != 1 && dev_mem[p_index].energy*co_energy+dev_mem[p_index].surface*co_surface < dev_param.merge_criteria) { cell_merge[index]++; } } if(cell_merge[index] < 4) { return; } float num_merge=cell_merge[index] / 4; uint current=start_index; uint p1st; uint p2nd; uint p3rd; uint p4th; uint p_index; while(num_merge > 0) { //1 while(current < end_index) { p_index=dev_index[current]; if(dev_mem[p_index].level != 1 && dev_mem[p_index].energy*co_energy+dev_mem[p_index].surface*co_surface < dev_param.merge_criteria) { p1st=p_index; current++; break; } current++; } //2 while(current < end_index) { p_index=dev_index[current]; if(dev_mem[p_index].level != 1 && dev_mem[p_index].energy*co_energy+dev_mem[p_index].surface*co_surface < dev_param.merge_criteria) { p2nd=p_index; current++; break; } current++; } //3 while(current < end_index) { p_index=dev_index[current]; if(dev_mem[p_index].level != 1 && dev_mem[p_index].energy*co_energy+dev_mem[p_index].surface*co_surface < dev_param.merge_criteria) { p3rd=p_index; current++; break; } current++; } //4 while(current < end_index) { p_index=dev_index[current]; if(dev_mem[p_index].level != 1 && dev_mem[p_index].energy*co_energy+dev_mem[p_index].surface*co_surface < dev_param.merge_criteria) { p4th=p_index; current++; break; } current++; } dev_mem[p1st].level=1; /*dev_mem[p1st].pos=(dev_mem[p1st].pos+dev_mem[p2nd].pos)/2; dev_mem[p1st].vel=(dev_mem[p1st].vel+dev_mem[p2nd].vel)/2; dev_mem[p1st].ev=(dev_mem[p1st].ev+dev_mem[p2nd].ev)/2; dev_mem[p1st].acc=(dev_mem[p1st].acc+dev_mem[p2nd].acc)/2;*/ ////// float V1; float V2; float V3; float V4; if(dev_mem[p1st].level == 1) { V1=dev_param.large_mass/dev_mem[p1st].dens; } if(dev_mem[p1st].level == 2) { V1=dev_param.small_mass/dev_mem[p1st].dens; } if(dev_mem[p2nd].level == 1) { V2=dev_param.large_mass/dev_mem[p2nd].dens; } if(dev_mem[p2nd].level == 2) { V2=dev_param.small_mass/dev_mem[p2nd].dens; } if(dev_mem[p3rd].level == 1) { V3=dev_param.large_mass/dev_mem[p3rd].dens; } if(dev_mem[p3rd].level == 2) { V3=dev_param.small_mass/dev_mem[p3rd].dens; } if(dev_mem[p4th].level == 1) { V4=dev_param.large_mass/dev_mem[p4th].dens; } if(dev_mem[p4th].level == 2) { V4=dev_param.small_mass/dev_mem[p4th].dens; } dev_mem[p1st].pos=(dev_mem[p1st].pos*V1+dev_mem[p2nd].pos*V2+dev_mem[p3rd].pos*V3+dev_mem[p4th].pos*V4)/(V1+V2+V3+V4); dev_mem[p1st].vel=(dev_mem[p1st].vel*V1+dev_mem[p2nd].vel*V2+dev_mem[p3rd].vel*V3+dev_mem[p4th].vel*V4)/(V1+V2+V3+V4); dev_mem[p1st].ev=(dev_mem[p1st].ev*V1+dev_mem[p2nd].ev*V2+dev_mem[p3rd].ev*V3+dev_mem[p4th].ev*V4)/(V1+V2+V3+V4); dev_mem[p1st].acc=(dev_mem[p1st].acc*V1+dev_mem[p2nd].acc*V2+dev_mem[p3rd].acc*V3+dev_mem[p4th].acc*V4)/(V1+V2+V3+V4); ////// dev_status[p1st]=0; dev_status[p2nd]=1; dev_status[p3rd]=1; dev_status[p4th]=1; num_merge--; } } } void merge(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, uint *cell_merge, uint *dev_status) { uint num_thread; uint num_block; compute_grid_size(total_cell, 256, num_block, num_thread); merge_kernel<<<num_block, num_thread>>>(dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell, cell_merge, dev_status); } uint rearrange(Particle *dev_mem, uint *dev_status, uint num_particle) { if(num_particle == 0) { return num_particle; } uint temp_num_particle=num_particle; uint num_threads; uint num_blocks; compute_grid_size(temp_num_particle, 256, num_blocks, num_threads); thrust::sort_by_key(thrust::device_ptr<uint>(dev_status), thrust::device_ptr<uint>(dev_status + temp_num_particle), thrust::device_ptr<Particle>(dev_mem)); temp_num_particle=thrust::reduce(thrust::device_ptr<uint>(dev_status), thrust::device_ptr<uint>(dev_status + temp_num_particle), (uint) 0, thrust::plus<uint>()); num_particle=num_particle-temp_num_particle; return num_particle; } __device__ void integrate_cell_dens(float3 dens_pos, uint index, uint3 cell_pos, Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, uint row_dens, uint col_dens, uint len_dens, uint tot_dens, float den_size, float *dev_dens) { Particle *p; uint particle_index; float radius2; float mass; float poly6_value; uint cell_index=calc_grid_hash(cell_pos); uint start_index=cell_start[cell_index]; if(start_index != 0xffffffff) { uint end_index=cell_end[cell_index]; for(uint count_index=start_index; count_index<end_index; count_index++) { particle_index=dev_index[count_index]; p=&(dev_mem[particle_index]); if(p->level == 1) { radius2=dev_param.large_radius*dev_param.large_radius; //radius2=dev_param.large_kernel_2; mass=dev_param.large_mass; poly6_value=dev_param.large_poly6_radius; } if(p->level == 2) { radius2=dev_param.small_radius*dev_param.small_radius; //radius2=dev_param.small_kernel_2; mass=dev_param.small_mass; poly6_value=dev_param.small_poly6_radius; } float3 rel_pos=p->pos-dens_pos; float r2=rel_pos.x*rel_pos.x+rel_pos.y*rel_pos.y+rel_pos.z*rel_pos.z; if(r2 < radius2) { dev_dens[index]+=mass*poly6_value*pow(radius2-r2, 3); } } } } __global__ void integrate_dens_kernel(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, uint row_dens, uint col_dens, uint len_dens, uint tot_dens, float den_size, float *dev_dens, float3 *dev_dens_pos) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= tot_dens) { return; } uint vox_x=index % (dev_param.row_dens*dev_param.col_dens) % dev_param.row_dens; uint vox_y=index % (dev_param.row_dens*dev_param.col_dens) / dev_param.row_dens; uint vox_z=index / (dev_param.row_dens*dev_param.col_dens); dev_dens[index]=0.0f; if(vox_x==0 || vox_y==0 || vox_z == 0 || vox_x==dev_param.row_dens || vox_y==dev_param.col_dens || vox_z==dev_param.len_dens) { return; } float3 dens_pos=dev_dens_pos[index]; uint3 cell_pos; cell_pos.x=(uint)floor((float)vox_x / (float)(dev_param.row_dens) * dev_param.row_cell); cell_pos.y=(uint)floor((float)vox_y / (float)(dev_param.col_dens) * dev_param.col_cell); cell_pos.z=(uint)floor((float)vox_z / (float)(dev_param.len_dens) * dev_param.len_cell); uint count; uint3 neighbor_pos[27]; uint neighbor_status[27]; for(count=0; count<27; count++) { neighbor_status[count]=1; } neighbor_pos[0].x=cell_pos.x; neighbor_pos[0].y=cell_pos.y; neighbor_pos[0].z=cell_pos.z; neighbor_pos[1].x=cell_pos.x-1; neighbor_pos[1].y=cell_pos.y-1; neighbor_pos[1].z=cell_pos.z; neighbor_pos[2].x=cell_pos.x-1; neighbor_pos[2].y=cell_pos.y; neighbor_pos[2].z=cell_pos.z; neighbor_pos[3].x=cell_pos.x; neighbor_pos[3].y=cell_pos.y-1; neighbor_pos[3].z=cell_pos.z; neighbor_pos[4].x=cell_pos.x+1; neighbor_pos[4].y=cell_pos.y+1; neighbor_pos[4].z=cell_pos.z; neighbor_pos[5].x=cell_pos.x+1; neighbor_pos[5].y=cell_pos.y; neighbor_pos[5].z=cell_pos.z; neighbor_pos[6].x=cell_pos.x; neighbor_pos[6].y=cell_pos.y+1; neighbor_pos[6].z=cell_pos.z; neighbor_pos[7].x=cell_pos.x+1; neighbor_pos[7].y=cell_pos.y-1; neighbor_pos[7].z=cell_pos.z; neighbor_pos[8].x=cell_pos.x-1; neighbor_pos[8].y=cell_pos.y+1; neighbor_pos[8].z=cell_pos.z; neighbor_pos[9].x=cell_pos.x; neighbor_pos[9].y=cell_pos.y; neighbor_pos[9].z=cell_pos.z-1; neighbor_pos[10].x=cell_pos.x-1; neighbor_pos[10].y=cell_pos.y-1; neighbor_pos[10].z=cell_pos.z-1; neighbor_pos[11].x=cell_pos.x-1; neighbor_pos[11].y=cell_pos.y; neighbor_pos[11].z=cell_pos.z-1; neighbor_pos[12].x=cell_pos.x; neighbor_pos[12].y=cell_pos.y-1; neighbor_pos[12].z=cell_pos.z-1; neighbor_pos[13].x=cell_pos.x+1; neighbor_pos[13].y=cell_pos.y+1; neighbor_pos[13].z=cell_pos.z-1; neighbor_pos[14].x=cell_pos.x+1; neighbor_pos[14].y=cell_pos.y; neighbor_pos[14].z=cell_pos.z-1; neighbor_pos[15].x=cell_pos.x; neighbor_pos[15].y=cell_pos.y+1; neighbor_pos[15].z=cell_pos.z-1; neighbor_pos[16].x=cell_pos.x+1; neighbor_pos[16].y=cell_pos.y-1; neighbor_pos[16].z=cell_pos.z-1; neighbor_pos[17].x=cell_pos.x-1; neighbor_pos[17].y=cell_pos.y+1; neighbor_pos[17].z=cell_pos.z-1; neighbor_pos[18].x=cell_pos.x; neighbor_pos[18].y=cell_pos.y; neighbor_pos[18].z=cell_pos.z+1; neighbor_pos[19].x=cell_pos.x-1; neighbor_pos[19].y=cell_pos.y-1; neighbor_pos[19].z=cell_pos.z+1; neighbor_pos[20].x=cell_pos.x-1; neighbor_pos[20].y=cell_pos.y; neighbor_pos[20].z=cell_pos.z+1; neighbor_pos[21].x=cell_pos.x; neighbor_pos[21].y=cell_pos.y-1; neighbor_pos[21].z=cell_pos.z+1; neighbor_pos[22].x=cell_pos.x+1; neighbor_pos[22].y=cell_pos.y+1; neighbor_pos[22].z=cell_pos.z+1; neighbor_pos[23].x=cell_pos.x+1; neighbor_pos[23].y=cell_pos.y; neighbor_pos[23].z=cell_pos.z+1; neighbor_pos[24].x=cell_pos.x; neighbor_pos[24].y=cell_pos.y+1; neighbor_pos[24].z=cell_pos.z+1; neighbor_pos[25].x=cell_pos.x+1; neighbor_pos[25].y=cell_pos.y-1; neighbor_pos[25].z=cell_pos.z+1; neighbor_pos[26].x=cell_pos.x-1; neighbor_pos[26].y=cell_pos.y+1; neighbor_pos[26].z=cell_pos.z+1; if(cell_pos.x == 0) { neighbor_status[1]=0; neighbor_status[2]=0; neighbor_status[8]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[17]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[26]=0; } if(cell_pos.x == dev_param.row_cell-1) { neighbor_status[4]=0; neighbor_status[5]=0; neighbor_status[7]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[16]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[25]=0; } if(cell_pos.y == 0) { neighbor_status[1]=0; neighbor_status[3]=0; neighbor_status[7]=0; neighbor_status[10]=0; neighbor_status[12]=0; neighbor_status[16]=0; neighbor_status[19]=0; neighbor_status[21]=0; neighbor_status[25]=0; } if(cell_pos.y == dev_param.col_cell-1) { neighbor_status[4]=0; neighbor_status[6]=0; neighbor_status[8]=0; neighbor_status[13]=0; neighbor_status[15]=0; neighbor_status[17]=0; neighbor_status[22]=0; neighbor_status[24]=0; neighbor_status[26]=0; } if(cell_pos.z == 0) { neighbor_status[9]=0; neighbor_status[10]=0; neighbor_status[11]=0; neighbor_status[12]=0; neighbor_status[13]=0; neighbor_status[14]=0; neighbor_status[15]=0; neighbor_status[16]=0; neighbor_status[17]=0; } if(cell_pos.z == dev_param.len_cell-1) { neighbor_status[18]=0; neighbor_status[19]=0; neighbor_status[20]=0; neighbor_status[21]=0; neighbor_status[22]=0; neighbor_status[23]=0; neighbor_status[24]=0; neighbor_status[25]=0; neighbor_status[26]=0; } for(count=0; count<27; count++) { if(neighbor_status[count] == 0) { continue; } integrate_cell_dens(dens_pos, index, neighbor_pos[count], dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell, row_dens, col_dens, len_dens, tot_dens, den_size, dev_dens); } dev_dens[index]=dev_dens[index]; /*if(dev_dens[index] > 0.0f) { dev_dens[index]=1.0f; }*/ } void integrate_dens(Particle *dev_mem, uint *dev_hash, uint *dev_index, uint *cell_start, uint *cell_end, uint num_particle, uint total_cell, uint row_dens, uint col_dens, uint len_dens, uint tot_dens, float den_size, float *dev_dens, float3 *dev_dens_pos) { uint num_thread; uint num_block; compute_grid_size(tot_dens, 256, num_block, num_thread); integrate_dens_kernel<<< num_block, num_thread >>>(dev_mem, dev_hash, dev_index, cell_start, cell_end, num_particle, total_cell, row_dens, col_dens, len_dens, tot_dens, den_size, dev_dens, dev_dens_pos); } __global__ void integrate_normall(float *dev_dens, float3 *dev_dens_pos, float3 *dev_dens_normal, uint tot_dens) { uint index=blockIdx.x*blockDim.x+threadIdx.x; if(index >= tot_dens) { return; } uint vox_x=index % (dev_param.row_dens*dev_param.col_dens) % dev_param.row_dens; uint vox_y=index % (dev_param.row_dens*dev_param.col_dens) / dev_param.row_dens; uint vox_z=index / (dev_param.row_dens*dev_param.col_dens); if(vox_x == 0) { uint x_b=vox_z*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x+1; float dens_a=0.0f; float dens_b=dev_dens[x_b]; dev_dens_normal[index].x=(dens_a-dens_b)/dev_param.den_size; } else if(vox_x == dev_param.row_dens-1) { uint x_a=vox_z*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x-1; float dens_a=dev_dens[x_a]; float dens_b=0.0f; dev_dens_normal[index].x=(dens_a-dens_b)/dev_param.den_size; } else { uint x_a=vox_z*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x-1; uint x_b=vox_z*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x+1; float dens_a=dev_dens[x_a]; float dens_b=dev_dens[x_b]; dev_dens_normal[index].x=(dens_a-dens_b)/dev_param.den_size; } if(vox_y == 0) { uint x_b=vox_z*dev_param.row_dens*dev_param.col_dens+(vox_y+1)*dev_param.row_dens+vox_x; float dens_a=0.0f; float dens_b=dev_dens[x_b];; dev_dens_normal[index].y=(dens_a-dens_b)/dev_param.den_size; } else if(vox_y == dev_param.col_dens-1) { uint x_a=vox_z*dev_param.row_dens*dev_param.col_dens+(vox_y-1)*dev_param.row_dens+vox_x; float dens_a=dev_dens[x_a];; float dens_b=0.0f; dev_dens_normal[index].y=(dens_a-dens_b)/dev_param.den_size; } else { uint x_a=vox_z*dev_param.row_dens*dev_param.col_dens+(vox_y-1)*dev_param.row_dens+vox_x; uint x_b=vox_z*dev_param.row_dens*dev_param.col_dens+(vox_y+1)*dev_param.row_dens+vox_x; float dens_a=dev_dens[x_a]; float dens_b=dev_dens[x_b]; dev_dens_normal[index].y=(dens_a-dens_b)/dev_param.den_size; } if(vox_z == 0) { uint x_b=(vox_z+1)*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x; float dens_a=0.0f; float dens_b=dev_dens[x_b]; dev_dens_normal[index].z=(dens_a-dens_b)/dev_param.den_size; } else if(vox_z == dev_param.len_dens-1) { uint x_a=(vox_z-1)*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x; float dens_a=dev_dens[x_a]; float dens_b=0.0f; dev_dens_normal[index].z=(dens_a-dens_b)/dev_param.den_size; } else { uint x_a=(vox_z-1)*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x; uint x_b=(vox_z+1)*dev_param.row_dens*dev_param.col_dens+vox_y*dev_param.row_dens+vox_x; float dens_a=dev_dens[x_a]; float dens_b=dev_dens[x_b]; dev_dens_normal[index].z=(dens_a-dens_b)/dev_param.den_size; } float normal=sqrt(dev_dens_normal[index].x*dev_dens_normal[index].x+dev_dens_normal[index].y*dev_dens_normal[index].y+dev_dens_normal[index].z*dev_dens_normal[index].z); if(normal == 0.0f) { dev_dens_normal[index]=make_float3(0.0f); } else { dev_dens_normal[index]=dev_dens_normal[index]/normal; } } void integrate_normal(float *dev_dens, float3 *dev_dens_pos, float3 *dev_dens_normal, uint tot_dens) { uint num_thread; uint num_block; compute_grid_size(tot_dens, 256, num_block, num_thread); integrate_normall<<< num_block, num_thread >>>(dev_dens, dev_dens_pos, dev_dens_normal, tot_dens); }
ca1db359ace27cfcb882c4c2d42842565c4ca240.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void device_api_kernel(hiprandState_t *states, float *out, int N) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; int nthreads = gridDim.x * blockDim.x; hiprandState_t *state = states + tid; hiprand_init(9384, tid, 0, state); for (i = tid; i < N; i += nthreads) { float rand = hiprand_uniform(state); rand = rand * 2; out[i] = rand; } }
ca1db359ace27cfcb882c4c2d42842565c4ca240.cu
#include "includes.h" __global__ void device_api_kernel(curandState *states, float *out, int N) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; int nthreads = gridDim.x * blockDim.x; curandState *state = states + tid; curand_init(9384, tid, 0, state); for (i = tid; i < N; i += nthreads) { float rand = curand_uniform(state); rand = rand * 2; out[i] = rand; } }
32d6cf42e6b37d3280ef05844eb277d1bed81cc2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu_common.h" #include <math.h> namespace seq2seq { #define SEQ2SEQ_TANH(x) (__fdividef(2.0f, (1.0f + __expf(-2.0f*(x)))) - 1.0f) #define SEQ2SEQ_TANH_D(x) (1.0f - (x) * (x)) #define SEQ2SEQ_SIGMOID(x) (__fdividef(1.0f, 1.0f + __expf(-(x)))) #define SEQ2SEQ_SIGMOID_D(x) ((x) * (1.0f - (x))) __global__ void initGPUData_ker(float *data, int numElements, float value) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < numElements) { data[tid] = value; } } void initGPUData(float *data, int numElements, float value) { dim3 gridDim; dim3 blockDim; blockDim.x = 1024; gridDim.x = (numElements + blockDim.x - 1) / blockDim.x; hipLaunchKernelGGL(( initGPUData_ker) , dim3(gridDim), dim3(blockDim) , 0, 0, data, numElements, value); } #define EMB_BATCH_THREADS_X 32 #define EMB_BATCH_BLOCKS_X 4 #define EMB_BATCH_BLOCKS_Y 128 __global__ void emb_ff_kernel(const float* w, const float* input, float* output, int batch_size, int seq_length, int emb_size) { int itx = threadIdx.y * blockDim.x + threadIdx.x; int ity = blockIdx.x; for (int batchid = blockIdx.y; batchid < batch_size; batchid += gridDim.y) { //DTYPE *dst_t = output + top_offset[batchid] * len; float *dst_t = output + batchid * seq_length * emb_size; //const DTYPE *index_t = word + offset[batchid]; const float* index_t = input + batchid * seq_length; for (int j = ity; j < seq_length; j += gridDim.x) { const float* emb_t = w + static_cast<unsigned int>(index_t[j]) * emb_size; float *dst_x = dst_t + j * emb_size; for (int i = itx; i < emb_size; i += blockDim.x * blockDim.y) { dst_x[i] = emb_t[i]; } } } } void emb_ff(const float* w, const float* input, float* output, int batch_size, int seq_length, int emb_size) { dim3 blocks(EMB_BATCH_BLOCKS_X, EMB_BATCH_BLOCKS_Y); dim3 threads(EMB_BATCH_THREADS_X, 1); hipLaunchKernelGGL(( emb_ff_kernel), dim3(blocks), dim3(threads) , 0, 0, w, input, output, batch_size, seq_length, emb_size); } __global__ void emb_bp_kernel(float* w, const float* input, const float* grad_output, int batch_size, int seq_length, int emb_size, const float mlr) { int itx = threadIdx.y * blockDim.x + threadIdx.x; for (int batchid = blockIdx.y; batchid < batch_size; batchid += gridDim.y) { const float* word = input + batchid * seq_length; for (int ity = blockIdx.x; ity < seq_length; ity += gridDim.x) { const float* grad_t = grad_output + (batchid * seq_length + ity) * emb_size; float* dst_t = w + static_cast<unsigned int>(word[ity]) * emb_size; for (int i = itx; i < emb_size; i += blockDim.x * blockDim.y) { atomicAdd(dst_t + i, mlr * grad_t[i]); } } } } void emb_bp(float* w, const float* input, const float* grad_output, int batch_size, int seq_length, int emb_size, const float mlr) { dim3 blocks(EMB_BATCH_BLOCKS_X, EMB_BATCH_BLOCKS_Y); dim3 threads(EMB_BATCH_THREADS_X, 1); hipLaunchKernelGGL(( emb_bp_kernel), dim3(blocks), dim3(threads) , 0, 0, w, input, grad_output, batch_size, seq_length, emb_size, mlr); } ////////////////////////////////////////////////////// // embedding ff/bp for feeding to rnn compute // ff result shape is seq_length * batch * emb_size ///////////////////////////////////////////////////// __global__ void emb_ff_for_rnn_kernel(const float* w, const float* input, float* output, int batch_size, int seq_length, int emb_size) { int total = seq_length * batch_size * emb_size; CUDA_KERNEL_LOOP(i, total) { int row = i / emb_size; int column = i % emb_size; const float* emb_t = w + static_cast<unsigned int>(input[row]) * emb_size; output[i] = emb_t[column]; } } void emb_ff_for_rnn(const float* w, const float* input, float* output, int batch_size, int seq_length, int emb_size) { int total = seq_length * batch_size * emb_size; const dim3 blockSize(CUDA_NUM_THREADS, 1, 1); const dim3 gridSize(GET_BLOCKS(total), 1, 1); hipLaunchKernelGGL(( emb_ff_for_rnn_kernel), dim3(gridSize), dim3(blockSize) , 0, 0, w, input, output, batch_size, seq_length, emb_size); } __global__ void emb_bp_for_rnn_kernel(float* w, const float* input, const float* grad_output, int batch_size, int seq_length, int emb_size, const float mlr) { int total = seq_length * batch_size * emb_size; CUDA_KERNEL_LOOP(i, total) { int row = i / emb_size; int column = i % emb_size; float* emb_t = w + static_cast<unsigned int>(input[row]) * emb_size; atomicAdd(emb_t + column, mlr * grad_output[i]); } } void emb_bp_for_rnn(float* w, const float* input, const float* grad_output, int batch_size, int seq_length, int emb_size, const float mlr) { int total = seq_length * batch_size * emb_size; hipLaunchKernelGGL(( emb_bp_for_rnn_kernel), dim3(GET_BLOCKS(total)), dim3(CUDA_NUM_THREADS), 0, 0, w, input, grad_output, batch_size, seq_length, emb_size, mlr); } __global__ void negative_loss_ff_kernel( const float* input, const float* labels, float* output, int batch, int num_labels, int pad_id) { CUDA_KERNEL_LOOP(i, batch) { unsigned int true_label = static_cast<unsigned int>(labels[i]); if (true_label != pad_id) { output[i] = -input[i * num_labels + true_label]; } else { output[i] = 0.0; } } } // TODO: return the result of real examples (not pad_id) void negative_loss_ff( const float* input, const float* labels, float* output, int batch, int num_labels, int pad_id) { hipLaunchKernelGGL(( negative_loss_ff_kernel), dim3(GET_BLOCKS(batch)), dim3(CUDA_NUM_THREADS), 0, 0, input, labels, output, batch, num_labels, pad_id); } __global__ void negative_loss_bp_kernel( const float* input, const float* labels, float* output, int batch, int num_labels, float loss_factor, int pad_id) { CUDA_KERNEL_LOOP(i, batch * num_labels) { unsigned int batch_id = i / num_labels; unsigned int this_label = i % num_labels; unsigned int true_label = static_cast<unsigned int>(labels[batch_id]); if (true_label == pad_id || this_label != true_label) { output[i] = 0.0; } else { output[i] = -loss_factor; } } } void negative_loss_bp( const float* input, const float* labels, float* output, int batch, int num_labels, float loss_factor, int pad_id) { hipLaunchKernelGGL(( negative_loss_bp_kernel), dim3(GET_BLOCKS(batch * num_labels)), dim3(CUDA_NUM_THREADS), 0, 0, input, labels, output, batch, num_labels, loss_factor, pad_id); } __global__ void add_at_w_and_u_terms_and_nonlinear_kernel( const float* w_terms, const float* u_terms, float* alignment_feats, int seq_len, int batch_size, int alignment_model_size) { CUDA_KERNEL_LOOP(i, seq_len * batch_size * alignment_model_size) { unsigned int col_id = i % (batch_size * alignment_model_size); alignment_feats[i] = SEQ2SEQ_TANH(w_terms[col_id] + u_terms[i]); } } void add_at_w_and_u_terms_and_nonlinear( const float* w_terms, const float* u_terms, float* alignment_feats, int seq_len, int batch_size, int alignment_model_size) { hipLaunchKernelGGL(( add_at_w_and_u_terms_and_nonlinear_kernel), dim3(GET_BLOCKS(seq_len * batch_size * alignment_model_size)), dim3(CUDA_NUM_THREADS), 0, 0, w_terms, u_terms, alignment_feats, seq_len, batch_size, alignment_model_size); } __global__ void add_at_w_and_u_terms_and_nonlinear_bp_kernel( const float* alignment_feats, const float* alignment_feats_diff, float* w_terms_diff, float* u_terms_diff, int seq_len, int batch_size, int alignment_model_size) { CUDA_KERNEL_LOOP(i, seq_len * batch_size * alignment_model_size) { unsigned int col_id = i % (batch_size * alignment_model_size); float tanhd = SEQ2SEQ_TANH_D(alignment_feats[i]) * alignment_feats_diff[i]; u_terms_diff[i] += tanhd; atomicAdd(w_terms_diff + col_id, tanhd); } // TODO: avoid atomicAdd } void add_at_w_and_u_terms_and_nonlinear_bp( const float* alignment_feats, const float* alignment_feats_diff, float* w_terms_diff, float* u_terms_diff, int seq_len, int batch_size, int alignment_model_size) { hipLaunchKernelGGL(( add_at_w_and_u_terms_and_nonlinear_bp_kernel), dim3(GET_BLOCKS(seq_len * batch_size * alignment_model_size)), dim3(CUDA_NUM_THREADS), 0, 0, alignment_feats, alignment_feats_diff, w_terms_diff, u_terms_diff, seq_len, batch_size, alignment_model_size); } __global__ void compute_context_kernel( const float* attention_weights, const float* encoder_hidden, float* context, int seq_len, int batch_size, int hidden_size) { CUDA_KERNEL_LOOP(i, batch_size * 2 * hidden_size) { int batch_id = i / (2 * hidden_size); context[i] = 0.0; for (int k = 0; k < seq_len; ++k) { context[i] += encoder_hidden[k * batch_size * 2 * hidden_size + i] \ * attention_weights[k * batch_size + batch_id]; } } } void compute_context( const float* attention_weights, const float* encoder_hidden, float* context, int seq_len, int batch_size, int hidden_size) { hipLaunchKernelGGL(( compute_context_kernel), dim3(GET_BLOCKS(batch_size * 2 * hidden_size)), dim3(CUDA_NUM_THREADS), 0, 0, attention_weights, encoder_hidden, context, seq_len, batch_size, hidden_size); } __global__ void bp_compute_context_kernel( const float* context_diff, const float* attention_weights, const float* encoder_hidden, float* attention_weights_diff, float* encoder_hidden_diff, int seq_len, int batch_size, int hidden_size) { CUDA_KERNEL_LOOP(i, seq_len * batch_size * 2 * hidden_size) { int j = i / (2 * hidden_size); int k = i % (batch_size * 2 * hidden_size); atomicAdd(attention_weights_diff + j, encoder_hidden[i] * context_diff[k]); // NOTICE, use += here, since every step in decoder has this diff encoder_hidden_diff[i] += attention_weights[j] * context_diff[k]; //atomicAdd(encoder_hidden_diff + i, attention_weights[j] * context_diff[k]); } // TODO: use a reduce paradigm to avoid atomicAdd } void bp_compute_context( const float* context_diff, const float* attention_weights, const float* encoder_hidden, float* attention_weights_diff, float* encoder_hidden_diff, int seq_len, int batch_size, int hidden_size) { // CAUTION HERE: only memset attention weights diff, dont memset hidden_diff // hipMemset(encoder_hidden_diff, 0.0, sizeof(float) * seq_len * batch_size * 2 * hidden_size); hipMemset(attention_weights_diff, 0.0, sizeof(float) * seq_len * batch_size); hipLaunchKernelGGL(( bp_compute_context_kernel), dim3(GET_BLOCKS(seq_len * batch_size * 2 * hidden_size)), dim3(CUDA_NUM_THREADS), 0, 0, context_diff, attention_weights, encoder_hidden, attention_weights_diff, encoder_hidden_diff, seq_len, batch_size, hidden_size); } __global__ void attention_decoder_ff_nonlinear_kernel( const float* h_data_tm1, const float* pre_gate_data_w_t, const float* pre_gate_data_u_t, const float* pre_gate_data_c_t, float* gate_data_t, float* h_data_t, const int batch_size, const int hidden_size) { CUDA_KERNEL_LOOP(i, batch_size * hidden_size) { int batch_id = i / hidden_size; int k = i % hidden_size; // reset gate int r_idx = batch_id * 3 * hidden_size + k; gate_data_t[r_idx] = SEQ2SEQ_SIGMOID(pre_gate_data_w_t[r_idx] \ + pre_gate_data_u_t[r_idx] + pre_gate_data_c_t[r_idx]); // update gate int z_idx = (batch_id * 3 + 1) * hidden_size + k; gate_data_t[z_idx] = SEQ2SEQ_SIGMOID(pre_gate_data_w_t[z_idx] \ + pre_gate_data_u_t[z_idx] \ + pre_gate_data_c_t[z_idx]); // new gate int n_idx = (batch_id * 3 + 2 ) * hidden_size + k; gate_data_t[n_idx] = SEQ2SEQ_TANH(pre_gate_data_w_t[n_idx] \ + gate_data_t[r_idx] * pre_gate_data_u_t[n_idx] \ + pre_gate_data_c_t[n_idx]); // output h_data_t[i] = (1.0 - gate_data_t[z_idx]) * h_data_tm1[i] + gate_data_t[z_idx] * gate_data_t[n_idx]; } } void attention_decoder_ff_nonlinear( const float* h_data_tm1, const float* pre_gate_data_w_t, const float* pre_gate_data_u_t, const float* pre_gate_data_c_t, float* gate_data_t, float* h_data_t, const int batch_size, const int hidden_size) { hipLaunchKernelGGL(( attention_decoder_ff_nonlinear_kernel), dim3(GET_BLOCKS(batch_size * hidden_size)), dim3(CUDA_NUM_THREADS), 0, 0, h_data_tm1, pre_gate_data_w_t, pre_gate_data_u_t, pre_gate_data_c_t, gate_data_t, h_data_t, batch_size, hidden_size); } __global__ void attention_decoder_bp_nonlinear_kernel( const float* h_data_tm1, const float* h_diff_t, const float* gate_data_t, const float* pre_gate_data_u_t, float* h_diff_tm1, float* pre_gate_diff_w_t, float* pre_gate_diff_u_t, float* pre_gate_diff_c_t, float* gate_diff_t, const int batch_size, const int hidden_size) { CUDA_KERNEL_LOOP(i, batch_size * hidden_size) { int batch_id = i / hidden_size; int k = i % hidden_size; // reset gate index int r_idx = batch_id * 3 * hidden_size + k; // update gate index int z_idx = (batch_id * 3 + 1) * hidden_size + k; // new gate index int n_idx = (batch_id * 3 + 2 ) * hidden_size + k; // grads wrt h_tm1, using += since it already has diff from upper computation h_diff_tm1[i] += (1.0 - gate_data_t[z_idx]) * h_diff_t[i]; // grads wrt new gate gate_diff_t[n_idx] = gate_data_t[z_idx] * h_diff_t[i]; // nonlinear grads float n_grad = gate_diff_t[n_idx] * SEQ2SEQ_TANH_D(gate_data_t[n_idx]); pre_gate_diff_w_t[n_idx] = n_grad; pre_gate_diff_u_t[n_idx] = n_grad * gate_data_t[r_idx]; pre_gate_diff_c_t[n_idx] = n_grad; // grads wrt update gate gate_diff_t[z_idx] = (gate_data_t[n_idx] - h_data_tm1[i]) * h_diff_t[i]; // nonlinear grads float z_grad = gate_diff_t[z_idx] * SEQ2SEQ_SIGMOID_D(gate_data_t[z_idx]); pre_gate_diff_w_t[z_idx] = z_grad; pre_gate_diff_u_t[z_idx] = z_grad; pre_gate_diff_c_t[z_idx] = z_grad; // grads wrt reset gate gate_diff_t[r_idx] = n_grad * pre_gate_data_u_t[n_idx]; float r_grad = gate_diff_t[r_idx] * SEQ2SEQ_SIGMOID_D(gate_data_t[r_idx]); pre_gate_diff_w_t[r_idx] = r_grad; pre_gate_diff_u_t[r_idx] = r_grad; pre_gate_diff_c_t[r_idx] = r_grad; } } void attention_decoder_bp_nonlinear( const float* h_data_tm1, const float* h_diff_t, const float* gate_data_t, const float* pre_gate_data_u_t, float* h_diff_tm1, float* pre_gate_diff_w_t, float* pre_gate_diff_u_t, float* pre_gate_diff_c_t, float* gate_diff_t, const int batch_size, const int hidden_size) { hipLaunchKernelGGL(( attention_decoder_bp_nonlinear_kernel), dim3(GET_BLOCKS(batch_size * hidden_size)), dim3(CUDA_NUM_THREADS), 0, 0, h_data_tm1, h_diff_t, gate_data_t, pre_gate_data_u_t, h_diff_tm1, pre_gate_diff_w_t, pre_gate_diff_u_t, pre_gate_diff_c_t, gate_diff_t, batch_size, hidden_size); } __global__ void copy_for_decoder_h0_data_kernel( const float* encoder_hidden_data, float* h0_data, int batch_size, int hidden_size) { CUDA_KERNEL_LOOP(i, batch_size * hidden_size) { int batch_id = i / hidden_size; int k = i % hidden_size; h0_data[i] = encoder_hidden_data[2 * hidden_size * batch_id + hidden_size + k]; } } void copy_for_decoder_h0_data( const float* encoder_hidden_data, float* h0_data, int batch_size, int hidden_size) { hipLaunchKernelGGL(( copy_for_decoder_h0_data_kernel), dim3(GET_BLOCKS(batch_size * hidden_size)), dim3(CUDA_NUM_THREADS), 0, 0, encoder_hidden_data, h0_data, batch_size, hidden_size); } __global__ void copy_for_decoder_h0_diff_kernel( const float* h0_diff, float* encoder_hidden_diff, int batch_size, int hidden_size) { CUDA_KERNEL_LOOP(i, batch_size * hidden_size) { int batch_id = i / hidden_size; int k = i % hidden_size; // use += here, since it already has diff encoder_hidden_diff[2 * hidden_size * batch_id + hidden_size + k] += h0_diff[i]; } } void copy_for_decoder_h0_diff( const float* h0_diff, float* encoder_hidden_diff, int batch_size, int hidden_size) { hipLaunchKernelGGL(( copy_for_decoder_h0_diff_kernel), dim3(GET_BLOCKS(batch_size * hidden_size)), dim3(CUDA_NUM_THREADS), 0, 0, h0_diff, encoder_hidden_diff, batch_size, hidden_size); } __global__ void maxout_ff_kernel( const float* pre_maxout_data, float* maxout_data, float* maxout_ele_idx, int total_output_size) { CUDA_KERNEL_LOOP(i, total_output_size) { int k = 2 * i; int kp1 = 2 * i + 1; if (pre_maxout_data[k] > pre_maxout_data[kp1]) { maxout_data[i] = pre_maxout_data[k]; maxout_ele_idx[i] = k; } else { maxout_data[i] = pre_maxout_data[kp1]; maxout_ele_idx[i] = kp1; } } } void maxout_ff( const float* pre_maxout_data, float* maxout_data, float* maxout_ele_idx, int total_output_size) { hipLaunchKernelGGL(( maxout_ff_kernel), dim3(GET_BLOCKS(total_output_size)), dim3(CUDA_NUM_THREADS), 0, 0, pre_maxout_data, maxout_data, maxout_ele_idx, total_output_size); } __global__ void maxout_bp_kernel( float* pre_maxout_diff, const float* maxout_diff, const float* maxout_ele_idx, int total_output_size) { CUDA_KERNEL_LOOP(i, total_output_size) { int idx = static_cast<int>(maxout_ele_idx[i]); pre_maxout_diff[idx] = maxout_diff[i]; } } void maxout_bp( float* pre_maxout_diff, const float* maxout_diff, const float* maxout_ele_idx, int total_output_size) { hipLaunchKernelGGL(( maxout_bp_kernel), dim3(GET_BLOCKS(total_output_size)), dim3(CUDA_NUM_THREADS), 0, 0, pre_maxout_diff, maxout_diff, maxout_ele_idx, total_output_size); } } // namespace seq2seq
32d6cf42e6b37d3280ef05844eb277d1bed81cc2.cu
#include "gpu_common.h" #include <math.h> namespace seq2seq { #define SEQ2SEQ_TANH(x) (__fdividef(2.0f, (1.0f + __expf(-2.0f*(x)))) - 1.0f) #define SEQ2SEQ_TANH_D(x) (1.0f - (x) * (x)) #define SEQ2SEQ_SIGMOID(x) (__fdividef(1.0f, 1.0f + __expf(-(x)))) #define SEQ2SEQ_SIGMOID_D(x) ((x) * (1.0f - (x))) __global__ void initGPUData_ker(float *data, int numElements, float value) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < numElements) { data[tid] = value; } } void initGPUData(float *data, int numElements, float value) { dim3 gridDim; dim3 blockDim; blockDim.x = 1024; gridDim.x = (numElements + blockDim.x - 1) / blockDim.x; initGPUData_ker <<< gridDim, blockDim >>> (data, numElements, value); } #define EMB_BATCH_THREADS_X 32 #define EMB_BATCH_BLOCKS_X 4 #define EMB_BATCH_BLOCKS_Y 128 __global__ void emb_ff_kernel(const float* w, const float* input, float* output, int batch_size, int seq_length, int emb_size) { int itx = threadIdx.y * blockDim.x + threadIdx.x; int ity = blockIdx.x; for (int batchid = blockIdx.y; batchid < batch_size; batchid += gridDim.y) { //DTYPE *dst_t = output + top_offset[batchid] * len; float *dst_t = output + batchid * seq_length * emb_size; //const DTYPE *index_t = word + offset[batchid]; const float* index_t = input + batchid * seq_length; for (int j = ity; j < seq_length; j += gridDim.x) { const float* emb_t = w + static_cast<unsigned int>(index_t[j]) * emb_size; float *dst_x = dst_t + j * emb_size; for (int i = itx; i < emb_size; i += blockDim.x * blockDim.y) { dst_x[i] = emb_t[i]; } } } } void emb_ff(const float* w, const float* input, float* output, int batch_size, int seq_length, int emb_size) { dim3 blocks(EMB_BATCH_BLOCKS_X, EMB_BATCH_BLOCKS_Y); dim3 threads(EMB_BATCH_THREADS_X, 1); emb_ff_kernel<<< blocks, threads >>> (w, input, output, batch_size, seq_length, emb_size); } __global__ void emb_bp_kernel(float* w, const float* input, const float* grad_output, int batch_size, int seq_length, int emb_size, const float mlr) { int itx = threadIdx.y * blockDim.x + threadIdx.x; for (int batchid = blockIdx.y; batchid < batch_size; batchid += gridDim.y) { const float* word = input + batchid * seq_length; for (int ity = blockIdx.x; ity < seq_length; ity += gridDim.x) { const float* grad_t = grad_output + (batchid * seq_length + ity) * emb_size; float* dst_t = w + static_cast<unsigned int>(word[ity]) * emb_size; for (int i = itx; i < emb_size; i += blockDim.x * blockDim.y) { atomicAdd(dst_t + i, mlr * grad_t[i]); } } } } void emb_bp(float* w, const float* input, const float* grad_output, int batch_size, int seq_length, int emb_size, const float mlr) { dim3 blocks(EMB_BATCH_BLOCKS_X, EMB_BATCH_BLOCKS_Y); dim3 threads(EMB_BATCH_THREADS_X, 1); emb_bp_kernel<<< blocks, threads >>> (w, input, grad_output, batch_size, seq_length, emb_size, mlr); } ////////////////////////////////////////////////////// // embedding ff/bp for feeding to rnn compute // ff result shape is seq_length * batch * emb_size ///////////////////////////////////////////////////// __global__ void emb_ff_for_rnn_kernel(const float* w, const float* input, float* output, int batch_size, int seq_length, int emb_size) { int total = seq_length * batch_size * emb_size; CUDA_KERNEL_LOOP(i, total) { int row = i / emb_size; int column = i % emb_size; const float* emb_t = w + static_cast<unsigned int>(input[row]) * emb_size; output[i] = emb_t[column]; } } void emb_ff_for_rnn(const float* w, const float* input, float* output, int batch_size, int seq_length, int emb_size) { int total = seq_length * batch_size * emb_size; const dim3 blockSize(CUDA_NUM_THREADS, 1, 1); const dim3 gridSize(GET_BLOCKS(total), 1, 1); emb_ff_for_rnn_kernel<<< gridSize, blockSize >>> (w, input, output, batch_size, seq_length, emb_size); } __global__ void emb_bp_for_rnn_kernel(float* w, const float* input, const float* grad_output, int batch_size, int seq_length, int emb_size, const float mlr) { int total = seq_length * batch_size * emb_size; CUDA_KERNEL_LOOP(i, total) { int row = i / emb_size; int column = i % emb_size; float* emb_t = w + static_cast<unsigned int>(input[row]) * emb_size; atomicAdd(emb_t + column, mlr * grad_output[i]); } } void emb_bp_for_rnn(float* w, const float* input, const float* grad_output, int batch_size, int seq_length, int emb_size, const float mlr) { int total = seq_length * batch_size * emb_size; emb_bp_for_rnn_kernel<<<GET_BLOCKS(total), CUDA_NUM_THREADS>>>( w, input, grad_output, batch_size, seq_length, emb_size, mlr); } __global__ void negative_loss_ff_kernel( const float* input, const float* labels, float* output, int batch, int num_labels, int pad_id) { CUDA_KERNEL_LOOP(i, batch) { unsigned int true_label = static_cast<unsigned int>(labels[i]); if (true_label != pad_id) { output[i] = -input[i * num_labels + true_label]; } else { output[i] = 0.0; } } } // TODO: return the result of real examples (not pad_id) void negative_loss_ff( const float* input, const float* labels, float* output, int batch, int num_labels, int pad_id) { negative_loss_ff_kernel<<<GET_BLOCKS(batch), CUDA_NUM_THREADS>>>( input, labels, output, batch, num_labels, pad_id); } __global__ void negative_loss_bp_kernel( const float* input, const float* labels, float* output, int batch, int num_labels, float loss_factor, int pad_id) { CUDA_KERNEL_LOOP(i, batch * num_labels) { unsigned int batch_id = i / num_labels; unsigned int this_label = i % num_labels; unsigned int true_label = static_cast<unsigned int>(labels[batch_id]); if (true_label == pad_id || this_label != true_label) { output[i] = 0.0; } else { output[i] = -loss_factor; } } } void negative_loss_bp( const float* input, const float* labels, float* output, int batch, int num_labels, float loss_factor, int pad_id) { negative_loss_bp_kernel<<<GET_BLOCKS(batch * num_labels), CUDA_NUM_THREADS>>>( input, labels, output, batch, num_labels, loss_factor, pad_id); } __global__ void add_at_w_and_u_terms_and_nonlinear_kernel( const float* w_terms, const float* u_terms, float* alignment_feats, int seq_len, int batch_size, int alignment_model_size) { CUDA_KERNEL_LOOP(i, seq_len * batch_size * alignment_model_size) { unsigned int col_id = i % (batch_size * alignment_model_size); alignment_feats[i] = SEQ2SEQ_TANH(w_terms[col_id] + u_terms[i]); } } void add_at_w_and_u_terms_and_nonlinear( const float* w_terms, const float* u_terms, float* alignment_feats, int seq_len, int batch_size, int alignment_model_size) { add_at_w_and_u_terms_and_nonlinear_kernel<<<GET_BLOCKS(seq_len * batch_size * alignment_model_size), CUDA_NUM_THREADS>>>( w_terms, u_terms, alignment_feats, seq_len, batch_size, alignment_model_size); } __global__ void add_at_w_and_u_terms_and_nonlinear_bp_kernel( const float* alignment_feats, const float* alignment_feats_diff, float* w_terms_diff, float* u_terms_diff, int seq_len, int batch_size, int alignment_model_size) { CUDA_KERNEL_LOOP(i, seq_len * batch_size * alignment_model_size) { unsigned int col_id = i % (batch_size * alignment_model_size); float tanhd = SEQ2SEQ_TANH_D(alignment_feats[i]) * alignment_feats_diff[i]; u_terms_diff[i] += tanhd; atomicAdd(w_terms_diff + col_id, tanhd); } // TODO: avoid atomicAdd } void add_at_w_and_u_terms_and_nonlinear_bp( const float* alignment_feats, const float* alignment_feats_diff, float* w_terms_diff, float* u_terms_diff, int seq_len, int batch_size, int alignment_model_size) { add_at_w_and_u_terms_and_nonlinear_bp_kernel<<<GET_BLOCKS(seq_len * batch_size * alignment_model_size), CUDA_NUM_THREADS>>>( alignment_feats, alignment_feats_diff, w_terms_diff, u_terms_diff, seq_len, batch_size, alignment_model_size); } __global__ void compute_context_kernel( const float* attention_weights, const float* encoder_hidden, float* context, int seq_len, int batch_size, int hidden_size) { CUDA_KERNEL_LOOP(i, batch_size * 2 * hidden_size) { int batch_id = i / (2 * hidden_size); context[i] = 0.0; for (int k = 0; k < seq_len; ++k) { context[i] += encoder_hidden[k * batch_size * 2 * hidden_size + i] \ * attention_weights[k * batch_size + batch_id]; } } } void compute_context( const float* attention_weights, const float* encoder_hidden, float* context, int seq_len, int batch_size, int hidden_size) { compute_context_kernel<<<GET_BLOCKS(batch_size * 2 * hidden_size), CUDA_NUM_THREADS>>>( attention_weights, encoder_hidden, context, seq_len, batch_size, hidden_size); } __global__ void bp_compute_context_kernel( const float* context_diff, const float* attention_weights, const float* encoder_hidden, float* attention_weights_diff, float* encoder_hidden_diff, int seq_len, int batch_size, int hidden_size) { CUDA_KERNEL_LOOP(i, seq_len * batch_size * 2 * hidden_size) { int j = i / (2 * hidden_size); int k = i % (batch_size * 2 * hidden_size); atomicAdd(attention_weights_diff + j, encoder_hidden[i] * context_diff[k]); // NOTICE, use += here, since every step in decoder has this diff encoder_hidden_diff[i] += attention_weights[j] * context_diff[k]; //atomicAdd(encoder_hidden_diff + i, attention_weights[j] * context_diff[k]); } // TODO: use a reduce paradigm to avoid atomicAdd } void bp_compute_context( const float* context_diff, const float* attention_weights, const float* encoder_hidden, float* attention_weights_diff, float* encoder_hidden_diff, int seq_len, int batch_size, int hidden_size) { // CAUTION HERE: only memset attention weights diff, dont memset hidden_diff // cudaMemset(encoder_hidden_diff, 0.0, sizeof(float) * seq_len * batch_size * 2 * hidden_size); cudaMemset(attention_weights_diff, 0.0, sizeof(float) * seq_len * batch_size); bp_compute_context_kernel<<<GET_BLOCKS(seq_len * batch_size * 2 * hidden_size), CUDA_NUM_THREADS>>>( context_diff, attention_weights, encoder_hidden, attention_weights_diff, encoder_hidden_diff, seq_len, batch_size, hidden_size); } __global__ void attention_decoder_ff_nonlinear_kernel( const float* h_data_tm1, const float* pre_gate_data_w_t, const float* pre_gate_data_u_t, const float* pre_gate_data_c_t, float* gate_data_t, float* h_data_t, const int batch_size, const int hidden_size) { CUDA_KERNEL_LOOP(i, batch_size * hidden_size) { int batch_id = i / hidden_size; int k = i % hidden_size; // reset gate int r_idx = batch_id * 3 * hidden_size + k; gate_data_t[r_idx] = SEQ2SEQ_SIGMOID(pre_gate_data_w_t[r_idx] \ + pre_gate_data_u_t[r_idx] + pre_gate_data_c_t[r_idx]); // update gate int z_idx = (batch_id * 3 + 1) * hidden_size + k; gate_data_t[z_idx] = SEQ2SEQ_SIGMOID(pre_gate_data_w_t[z_idx] \ + pre_gate_data_u_t[z_idx] \ + pre_gate_data_c_t[z_idx]); // new gate int n_idx = (batch_id * 3 + 2 ) * hidden_size + k; gate_data_t[n_idx] = SEQ2SEQ_TANH(pre_gate_data_w_t[n_idx] \ + gate_data_t[r_idx] * pre_gate_data_u_t[n_idx] \ + pre_gate_data_c_t[n_idx]); // output h_data_t[i] = (1.0 - gate_data_t[z_idx]) * h_data_tm1[i] + gate_data_t[z_idx] * gate_data_t[n_idx]; } } void attention_decoder_ff_nonlinear( const float* h_data_tm1, const float* pre_gate_data_w_t, const float* pre_gate_data_u_t, const float* pre_gate_data_c_t, float* gate_data_t, float* h_data_t, const int batch_size, const int hidden_size) { attention_decoder_ff_nonlinear_kernel<<<GET_BLOCKS(batch_size * hidden_size), CUDA_NUM_THREADS>>>( h_data_tm1, pre_gate_data_w_t, pre_gate_data_u_t, pre_gate_data_c_t, gate_data_t, h_data_t, batch_size, hidden_size); } __global__ void attention_decoder_bp_nonlinear_kernel( const float* h_data_tm1, const float* h_diff_t, const float* gate_data_t, const float* pre_gate_data_u_t, float* h_diff_tm1, float* pre_gate_diff_w_t, float* pre_gate_diff_u_t, float* pre_gate_diff_c_t, float* gate_diff_t, const int batch_size, const int hidden_size) { CUDA_KERNEL_LOOP(i, batch_size * hidden_size) { int batch_id = i / hidden_size; int k = i % hidden_size; // reset gate index int r_idx = batch_id * 3 * hidden_size + k; // update gate index int z_idx = (batch_id * 3 + 1) * hidden_size + k; // new gate index int n_idx = (batch_id * 3 + 2 ) * hidden_size + k; // grads wrt h_tm1, using += since it already has diff from upper computation h_diff_tm1[i] += (1.0 - gate_data_t[z_idx]) * h_diff_t[i]; // grads wrt new gate gate_diff_t[n_idx] = gate_data_t[z_idx] * h_diff_t[i]; // nonlinear grads float n_grad = gate_diff_t[n_idx] * SEQ2SEQ_TANH_D(gate_data_t[n_idx]); pre_gate_diff_w_t[n_idx] = n_grad; pre_gate_diff_u_t[n_idx] = n_grad * gate_data_t[r_idx]; pre_gate_diff_c_t[n_idx] = n_grad; // grads wrt update gate gate_diff_t[z_idx] = (gate_data_t[n_idx] - h_data_tm1[i]) * h_diff_t[i]; // nonlinear grads float z_grad = gate_diff_t[z_idx] * SEQ2SEQ_SIGMOID_D(gate_data_t[z_idx]); pre_gate_diff_w_t[z_idx] = z_grad; pre_gate_diff_u_t[z_idx] = z_grad; pre_gate_diff_c_t[z_idx] = z_grad; // grads wrt reset gate gate_diff_t[r_idx] = n_grad * pre_gate_data_u_t[n_idx]; float r_grad = gate_diff_t[r_idx] * SEQ2SEQ_SIGMOID_D(gate_data_t[r_idx]); pre_gate_diff_w_t[r_idx] = r_grad; pre_gate_diff_u_t[r_idx] = r_grad; pre_gate_diff_c_t[r_idx] = r_grad; } } void attention_decoder_bp_nonlinear( const float* h_data_tm1, const float* h_diff_t, const float* gate_data_t, const float* pre_gate_data_u_t, float* h_diff_tm1, float* pre_gate_diff_w_t, float* pre_gate_diff_u_t, float* pre_gate_diff_c_t, float* gate_diff_t, const int batch_size, const int hidden_size) { attention_decoder_bp_nonlinear_kernel<<<GET_BLOCKS(batch_size * hidden_size), CUDA_NUM_THREADS>>>( h_data_tm1, h_diff_t, gate_data_t, pre_gate_data_u_t, h_diff_tm1, pre_gate_diff_w_t, pre_gate_diff_u_t, pre_gate_diff_c_t, gate_diff_t, batch_size, hidden_size); } __global__ void copy_for_decoder_h0_data_kernel( const float* encoder_hidden_data, float* h0_data, int batch_size, int hidden_size) { CUDA_KERNEL_LOOP(i, batch_size * hidden_size) { int batch_id = i / hidden_size; int k = i % hidden_size; h0_data[i] = encoder_hidden_data[2 * hidden_size * batch_id + hidden_size + k]; } } void copy_for_decoder_h0_data( const float* encoder_hidden_data, float* h0_data, int batch_size, int hidden_size) { copy_for_decoder_h0_data_kernel<<<GET_BLOCKS(batch_size * hidden_size), CUDA_NUM_THREADS>>>( encoder_hidden_data, h0_data, batch_size, hidden_size); } __global__ void copy_for_decoder_h0_diff_kernel( const float* h0_diff, float* encoder_hidden_diff, int batch_size, int hidden_size) { CUDA_KERNEL_LOOP(i, batch_size * hidden_size) { int batch_id = i / hidden_size; int k = i % hidden_size; // use += here, since it already has diff encoder_hidden_diff[2 * hidden_size * batch_id + hidden_size + k] += h0_diff[i]; } } void copy_for_decoder_h0_diff( const float* h0_diff, float* encoder_hidden_diff, int batch_size, int hidden_size) { copy_for_decoder_h0_diff_kernel<<<GET_BLOCKS(batch_size * hidden_size), CUDA_NUM_THREADS>>>( h0_diff, encoder_hidden_diff, batch_size, hidden_size); } __global__ void maxout_ff_kernel( const float* pre_maxout_data, float* maxout_data, float* maxout_ele_idx, int total_output_size) { CUDA_KERNEL_LOOP(i, total_output_size) { int k = 2 * i; int kp1 = 2 * i + 1; if (pre_maxout_data[k] > pre_maxout_data[kp1]) { maxout_data[i] = pre_maxout_data[k]; maxout_ele_idx[i] = k; } else { maxout_data[i] = pre_maxout_data[kp1]; maxout_ele_idx[i] = kp1; } } } void maxout_ff( const float* pre_maxout_data, float* maxout_data, float* maxout_ele_idx, int total_output_size) { maxout_ff_kernel<<<GET_BLOCKS(total_output_size), CUDA_NUM_THREADS>>>( pre_maxout_data, maxout_data, maxout_ele_idx, total_output_size); } __global__ void maxout_bp_kernel( float* pre_maxout_diff, const float* maxout_diff, const float* maxout_ele_idx, int total_output_size) { CUDA_KERNEL_LOOP(i, total_output_size) { int idx = static_cast<int>(maxout_ele_idx[i]); pre_maxout_diff[idx] = maxout_diff[i]; } } void maxout_bp( float* pre_maxout_diff, const float* maxout_diff, const float* maxout_ele_idx, int total_output_size) { maxout_bp_kernel<<<GET_BLOCKS(total_output_size), CUDA_NUM_THREADS>>>( pre_maxout_diff, maxout_diff, maxout_ele_idx, total_output_size); } } // namespace seq2seq
b72bf09bf4123fc2da4ef7e7dc0188ad67dea816.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace testing { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto* _pp_var_cai __attribute__((unused)) = params_.state_vars[0];\ auto& _pp_var_ion_ca __attribute__((unused)) = params_.ion_states[0];\ auto* _pp_var_ion_ca_index __attribute__((unused)) = params_.ion_states[0].index;\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void advance_state(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { _pp_var_cai[tid_] = 0.00051999999999999995; } } __global__ void write_ions(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto ion_ca_indexi_ = _pp_var_ion_ca_index[tid_]; arb_value_type cai_shadowed_ = 0; cai_shadowed_ = _pp_var_cai[tid_]; _pp_var_ion_ca.internal_concentration[ion_ca_indexi_] = fma(_pp_var_weight[tid_], cai_shadowed_, _pp_var_ion_ca.internal_concentration[ion_ca_indexi_]); } } } // namespace void mechanism_write_cai_breakpoint_gpu_init_(arb_mechanism_ppack* p) {} void mechanism_write_cai_breakpoint_gpu_compute_currents_(arb_mechanism_ppack* p) {} void mechanism_write_cai_breakpoint_gpu_advance_state_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p); } void mechanism_write_cai_breakpoint_gpu_write_ions_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( write_ions), dim3(grid_dim), dim3(block_dim), 0, 0, *p); } void mechanism_write_cai_breakpoint_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_write_cai_breakpoint_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace testing
b72bf09bf4123fc2da4ef7e7dc0188ad67dea816.cu
#include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace testing { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto* _pp_var_cai __attribute__((unused)) = params_.state_vars[0];\ auto& _pp_var_ion_ca __attribute__((unused)) = params_.ion_states[0];\ auto* _pp_var_ion_ca_index __attribute__((unused)) = params_.ion_states[0].index;\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void advance_state(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { _pp_var_cai[tid_] = 0.00051999999999999995; } } __global__ void write_ions(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto ion_ca_indexi_ = _pp_var_ion_ca_index[tid_]; arb_value_type cai_shadowed_ = 0; cai_shadowed_ = _pp_var_cai[tid_]; _pp_var_ion_ca.internal_concentration[ion_ca_indexi_] = fma(_pp_var_weight[tid_], cai_shadowed_, _pp_var_ion_ca.internal_concentration[ion_ca_indexi_]); } } } // namespace void mechanism_write_cai_breakpoint_gpu_init_(arb_mechanism_ppack* p) {} void mechanism_write_cai_breakpoint_gpu_compute_currents_(arb_mechanism_ppack* p) {} void mechanism_write_cai_breakpoint_gpu_advance_state_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); advance_state<<<grid_dim, block_dim>>>(*p); } void mechanism_write_cai_breakpoint_gpu_write_ions_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); write_ions<<<grid_dim, block_dim>>>(*p); } void mechanism_write_cai_breakpoint_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_write_cai_breakpoint_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace testing
add434b1034d75c3dce60b3e26ab9833df9816af.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <algorithm> #include <cmath> #include <random> #include <vector> #include <raft/mr/device/allocator.hpp> #include <raft/cudart_utils.h> #include <test_utils.h> #include <linalg/batched/matrix.cuh> #include <raft/linalg/add.cuh> #include "../linalg_naive.h" #include "../test_utils.h" namespace MLCommon { namespace LinAlg { namespace Batched { enum MatrixOperation { AB_op, // Matrix-matrix product (with GEMM) AZT_op, // Matrix-vector product (with GEMM) ZA_op, // Vector-matrix product (with GEMM) ApB_op, // Addition AmB_op, // Substraction AkB_op, // Kronecker product AsolveZ_op, // Linear equation solver Ax=b LaggedZ_op, // Lag matrix CopyA2D_op, // 2D copy DiffA_op, // Vector first difference Hessenberg_op, // Hessenberg decomposition A=UHU' Schur_op, // Schur decomposition A=USU' Lyapunov_op, // Lyapunov equation solver AXA'-X+B=0 }; template <typename T> struct MatrixInputs { MatrixOperation operation; int batch_size; int m; // Usually the dimensions of A and/or Z int n; int p; // Usually the dimensions of B or other parameters int q; int s; // Additional parameters for operations that need more than 4 int t; T tolerance; }; template <typename T> class MatrixTest : public ::testing::TestWithParam<MatrixInputs<T>> { protected: void SetUp() override { using std::vector; params = ::testing::TestWithParam<MatrixInputs<T>>::GetParam(); // Find out whether A, B and Z will be used (depending on the operation) bool use_A = (params.operation != LaggedZ_op); bool use_B = (params.operation == AB_op) || (params.operation == ApB_op) || (params.operation == AmB_op) || (params.operation == AkB_op) || (params.operation == Lyapunov_op); bool use_Z = (params.operation == AZT_op) || (params.operation == ZA_op) || (params.operation == AsolveZ_op) || (params.operation == LaggedZ_op); bool Z_col = (params.operation == AsolveZ_op); int r = params.operation == AZT_op ? params.n : params.m; // Check if the dimensions are valid and compute the output dimensions int m_r, n_r; switch (params.operation) { case AB_op: ASSERT_TRUE(params.n == params.p); m_r = params.m; n_r = params.q; break; case ApB_op: case AmB_op: ASSERT_TRUE(params.m == params.p && params.n == params.q); m_r = params.m; n_r = params.n; break; case AkB_op: m_r = params.m * params.p; n_r = params.n * params.q; break; case AZT_op: m_r = params.m; n_r = 1; break; case ZA_op: m_r = 1; n_r = params.n; break; case AsolveZ_op: ASSERT_TRUE(params.n == params.m); // For this test we multiply A by the solution and check against Z m_r = params.m; n_r = 1; break; case LaggedZ_op: // For this operation params.n holds the number of lags m_r = params.m - params.n; n_r = params.n; break; case CopyA2D_op: // For this operation p and q are the dimensions of the copy window m_r = params.p; n_r = params.q; break; case DiffA_op: // Note: A can represent either a row or column vector ASSERT_TRUE(params.m == 1 || params.n == 1); m_r = ::max(1, params.m - 1); n_r = ::max(1, params.n - 1); break; case Hessenberg_op: case Schur_op: case Lyapunov_op: ASSERT_TRUE(params.m == params.n && params.m == params.p && params.m == params.q); m_r = params.m; n_r = params.m; break; } // Create test matrices and vector std::vector<T> A; std::vector<T> B; std::vector<T> Z; if (use_A) A.resize(params.batch_size * params.m * params.n); if (use_B) B.resize(params.batch_size * params.p * params.q); if (use_Z) Z.resize(params.batch_size * r); // Generate random data std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<T> udis(-1.0, 3.0); for (int i = 0; i < A.size(); i++) A[i] = udis(gen); for (int i = 0; i < B.size(); i++) B[i] = udis(gen); for (int i = 0; i < Z.size(); i++) Z[i] = udis(gen); // Create handles, stream, allocator CUBLAS_CHECK(hipblasCreate(&handle)); CUDA_CHECK(hipStreamCreate(&stream)); auto allocator = std::make_shared<raft::mr::device::default_allocator>(); // Created batched matrices Matrix<T> AbM(params.m, params.n, params.batch_size, handle, allocator, stream); Matrix<T> BbM(params.p, params.q, params.batch_size, handle, allocator, stream); Matrix<T> ZbM(Z_col ? r : 1, Z_col ? 1 : r, params.batch_size, handle, allocator, stream); // Copy the data to the device if (use_A) raft::update_device(AbM.raw_data(), A.data(), A.size(), stream); if (use_B) raft::update_device(BbM.raw_data(), B.data(), B.size(), stream); if (use_Z) raft::update_device(ZbM.raw_data(), Z.data(), Z.size(), stream); // Create fake batched matrices to be overwritten by results res_bM = new Matrix<T>(1, 1, 1, handle, allocator, stream); // Compute the tested results switch (params.operation) { case AB_op: *res_bM = AbM * BbM; break; case ApB_op: *res_bM = AbM + BbM; break; case AmB_op: *res_bM = AbM - BbM; break; case AkB_op: *res_bM = b_kron(AbM, BbM); break; case AZT_op: *res_bM = b_gemm(AbM, ZbM, false, true); break; case ZA_op: *res_bM = ZbM * AbM; break; case AsolveZ_op: // A * A\Z -> should be Z *res_bM = AbM * b_solve(AbM, ZbM); break; case LaggedZ_op: *res_bM = b_lagged_mat(ZbM, params.n); break; case CopyA2D_op: *res_bM = b_2dcopy(AbM, params.s, params.t, params.p, params.q); break; case DiffA_op: *res_bM = AbM.difference(); break; case Hessenberg_op: { constexpr T zero_tolerance = std::is_same<T, double>::value ? 1e-7 : 1e-3f; int n = params.m; Matrix<T> HbM(n, n, params.batch_size, handle, allocator, stream); Matrix<T> UbM(n, n, params.batch_size, handle, allocator, stream); b_hessenberg(AbM, UbM, HbM); // Check that H is in Hessenberg form std::vector<T> H = std::vector<T>(n * n * params.batch_size); raft::update_host(H.data(), HbM.raw_data(), H.size(), stream); CUDA_CHECK(hipStreamSynchronize(stream)); for (int ib = 0; ib < params.batch_size; ib++) { for (int j = 0; j < n - 2; j++) { for (int i = j + 2; i < n; i++) { ASSERT_TRUE(raft::abs(H[n * n * ib + n * j + i]) < zero_tolerance); } } } // Check that U is unitary (UU'=I) std::vector<T> UUt = std::vector<T>(n * n * params.batch_size); raft::update_host(UUt.data(), b_gemm(UbM, UbM, false, true).raw_data(), UUt.size(), stream); CUDA_CHECK(hipStreamSynchronize(stream)); for (int ib = 0; ib < params.batch_size; ib++) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { ASSERT_TRUE(raft::abs(UUt[n * n * ib + n * j + i] - (i == j ? (T)1 : (T)0)) < zero_tolerance); } } } // Write UHU' in the result (will be compared against A) *res_bM = UbM * b_gemm(HbM, UbM, false, true); break; } case Schur_op: { constexpr T zero_tolerance = std::is_same<T, double>::value ? 1e-7 : 1e-3f; int n = params.m; Matrix<T> SbM(n, n, params.batch_size, handle, allocator, stream); Matrix<T> UbM(n, n, params.batch_size, handle, allocator, stream); b_schur(AbM, UbM, SbM); // Check that S is in Schur form std::vector<T> S = std::vector<T>(n * n * params.batch_size); raft::update_host(S.data(), SbM.raw_data(), S.size(), stream); CUDA_CHECK(hipStreamSynchronize(stream)); for (int ib = 0; ib < params.batch_size; ib++) { for (int j = 0; j < n - 2; j++) { for (int i = j + 2; i < n; i++) { ASSERT_TRUE(raft::abs(S[n * n * ib + n * j + i]) < zero_tolerance); } } } for (int ib = 0; ib < params.batch_size; ib++) { for (int k = 0; k < n - 3; k++) { ASSERT_FALSE( raft::abs(S[n * n * ib + n * k + k + 1]) > zero_tolerance && raft::abs(S[n * n * ib + n * (k + 1) + k + 2]) > zero_tolerance && raft::abs(S[n * n * ib + n * (k + 2) + k + 3]) > zero_tolerance); } } // Check that U is unitary (UU'=I) std::vector<T> UUt = std::vector<T>(n * n * params.batch_size); raft::update_host(UUt.data(), b_gemm(UbM, UbM, false, true).raw_data(), UUt.size(), stream); CUDA_CHECK(hipStreamSynchronize(stream)); for (int ib = 0; ib < params.batch_size; ib++) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { ASSERT_TRUE(raft::abs(UUt[n * n * ib + n * j + i] - (i == j ? (T)1 : (T)0)) < zero_tolerance); } } } // Write USU' in the result (will be compared against A) *res_bM = UbM * b_gemm(SbM, UbM, false, true); break; } case Lyapunov_op: { Matrix<T> XbM = b_lyapunov(AbM, BbM); // Write AXA'-X in the result (will be compared against -B) *res_bM = AbM * b_gemm(XbM, AbM, false, true) - XbM; break; } } // Compute the expected results res_h.resize(params.batch_size * m_r * n_r); switch (params.operation) { case AB_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::matMul(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, B.data() + bid * params.p * params.q, params.m, params.n, params.q); } break; case ApB_op: Naive::add(res_h.data(), A.data(), B.data(), A.size()); break; case AmB_op: Naive::add(res_h.data(), A.data(), B.data(), A.size(), T(-1.0)); break; case AkB_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::kronecker(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, B.data() + bid * params.p * params.q, params.m, params.n, params.p, params.q); } break; case AZT_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::matMul(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, Z.data() + bid * r, params.m, params.n, 1); } break; case ZA_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::matMul(res_h.data() + bid * m_r * n_r, Z.data() + bid * r, A.data() + bid * params.m * params.n, 1, params.m, params.n); } break; case AsolveZ_op: // Simply copy Z in the result memcpy(res_h.data(), Z.data(), r * params.batch_size * sizeof(T)); break; case LaggedZ_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::laggedMat(res_h.data() + bid * m_r * n_r, Z.data() + bid * params.m, params.m, params.n); } break; case CopyA2D_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::copy2D(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, params.s, params.t, params.m, m_r, n_r); } break; case DiffA_op: { int len = params.m * params.n; for (int bid = 0; bid < params.batch_size; bid++) { Naive::diff(res_h.data() + bid * (len - 1), A.data() + bid * len, len); } break; } case Hessenberg_op: case Schur_op: // Simply copy A (will be compared against UHU') memcpy(res_h.data(), A.data(), params.m * params.m * params.batch_size * sizeof(T)); break; case Lyapunov_op: // Simply copy -B (will be compared against AXA'-X) for (int i = 0; i < params.m * params.m * params.batch_size; i++) { res_h[i] = -B[i]; } break; } CUDA_CHECK(hipStreamSynchronize(stream)); } void TearDown() override { delete res_bM; CUBLAS_CHECK(hipblasDestroy(handle)); CUDA_CHECK(hipStreamDestroy(stream)); } protected: MatrixInputs<T> params; Matrix<T> *res_bM; std::vector<T> res_h; hipblasHandle_t handle; hipStream_t stream; }; // Test parameters (op, batch_size, m, n, p, q, s, t, tolerance) const std::vector<MatrixInputs<double>> inputsd = { {AB_op, 7, 15, 37, 37, 11, 0, 0, 1e-6}, {AZT_op, 5, 33, 65, 1, 1, 0, 0, 1e-6}, {ZA_op, 8, 12, 41, 1, 1, 0, 0, 1e-6}, {ApB_op, 4, 16, 48, 16, 48, 0, 0, 1e-6}, {AmB_op, 17, 9, 3, 9, 3, 0, 0, 1e-6}, {AkB_op, 5, 3, 13, 31, 8, 0, 0, 1e-6}, {AkB_op, 3, 7, 12, 31, 15, 0, 0, 1e-6}, {AkB_op, 2, 11, 2, 8, 46, 0, 0, 1e-6}, {AsolveZ_op, 6, 17, 17, 1, 1, 0, 0, 1e-6}, {LaggedZ_op, 5, 31, 9, 1, 1, 0, 0, 1e-6}, {LaggedZ_op, 7, 129, 3, 1, 1, 0, 0, 1e-6}, {CopyA2D_op, 11, 31, 63, 17, 14, 5, 9, 1e-6}, {CopyA2D_op, 4, 33, 7, 30, 4, 3, 0, 1e-6}, {DiffA_op, 5, 11, 1, 1, 1, 0, 0, 1e-6}, {DiffA_op, 15, 1, 37, 1, 1, 0, 0, 1e-6}, {Hessenberg_op, 10, 15, 15, 15, 15, 0, 0, 1e-6}, {Hessenberg_op, 30, 61, 61, 61, 61, 0, 0, 1e-6}, // {Schur_op, 7, 12, 12, 12, 12, 0, 0, 1e-3}, // {Schur_op, 17, 77, 77, 77, 77, 0, 0, 1e-3}, // {Lyapunov_op, 5, 14, 14, 14, 14, 0, 0, 1e-2}, // {Lyapunov_op, 13, 100, 100, 100, 100, 0, 0, 1e-2} }; // Note: Schur and Lyapunov tests have had stability issues on CI so // they are disabled temporarily. See issue: // https://github.com/rapidsai/cuml/issues/1949 // Test parameters (op, batch_size, m, n, p, q, s, t, tolerance) const std::vector<MatrixInputs<float>> inputsf = { {AB_op, 7, 15, 37, 37, 11, 0, 0, 1e-2}, {AZT_op, 5, 33, 65, 1, 1, 0, 0, 1e-2}, {ZA_op, 8, 12, 41, 1, 1, 0, 0, 1e-2}, {ApB_op, 4, 16, 48, 16, 48, 0, 0, 1e-2}, {AmB_op, 17, 9, 3, 9, 3, 0, 0, 1e-2}, {AkB_op, 5, 3, 13, 31, 8, 0, 0, 1e-2}, {AkB_op, 3, 7, 12, 31, 15, 0, 0, 1e-2}, {AkB_op, 2, 11, 2, 8, 46, 0, 0, 1e-2}, {AsolveZ_op, 6, 17, 17, 1, 1, 0, 0, 1e-2}, {LaggedZ_op, 5, 31, 9, 1, 1, 0, 0, 1e-5}, {LaggedZ_op, 7, 129, 3, 1, 1, 0, 0, 1e-5}, {CopyA2D_op, 11, 31, 63, 17, 14, 5, 9, 1e-5}, {CopyA2D_op, 4, 33, 7, 30, 4, 3, 0, 1e-5}, {DiffA_op, 5, 11, 1, 1, 1, 0, 0, 1e-2}, {DiffA_op, 15, 1, 37, 1, 1, 0, 0, 1e-2}, {Hessenberg_op, 10, 15, 15, 15, 15, 0, 0, 1e-2}, {Hessenberg_op, 30, 61, 61, 61, 61, 0, 0, 1e-2}, // {Schur_op, 7, 12, 12, 12, 12, 0, 0, 1e-2}, // {Schur_op, 17, 77, 77, 77, 77, 0, 0, 1e-2}, // {Lyapunov_op, 5, 14, 14, 14, 14, 0, 0, 1e-2}, // {Lyapunov_op, 13, 100, 100, 100, 100, 0, 0, 1e-2} }; // Note: Schur and Lyapunov operations don't give good precision for // single-precision floating-point numbers yet... using BatchedMatrixTestD = MatrixTest<double>; using BatchedMatrixTestF = MatrixTest<float>; TEST_P(BatchedMatrixTestD, Result) { ASSERT_TRUE(raft::devArrMatchHost( res_h.data(), res_bM->raw_data(), res_h.size(), raft::CompareApprox<double>(params.tolerance), stream)); } TEST_P(BatchedMatrixTestF, Result) { ASSERT_TRUE(raft::devArrMatchHost( res_h.data(), res_bM->raw_data(), res_h.size(), raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(BatchedMatrixTests, BatchedMatrixTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(BatchedMatrixTests, BatchedMatrixTestF, ::testing::ValuesIn(inputsf)); } // namespace Batched } // namespace LinAlg } // namespace MLCommon
add434b1034d75c3dce60b3e26ab9833df9816af.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <algorithm> #include <cmath> #include <random> #include <vector> #include <raft/mr/device/allocator.hpp> #include <raft/cudart_utils.h> #include <test_utils.h> #include <linalg/batched/matrix.cuh> #include <raft/linalg/add.cuh> #include "../linalg_naive.h" #include "../test_utils.h" namespace MLCommon { namespace LinAlg { namespace Batched { enum MatrixOperation { AB_op, // Matrix-matrix product (with GEMM) AZT_op, // Matrix-vector product (with GEMM) ZA_op, // Vector-matrix product (with GEMM) ApB_op, // Addition AmB_op, // Substraction AkB_op, // Kronecker product AsolveZ_op, // Linear equation solver Ax=b LaggedZ_op, // Lag matrix CopyA2D_op, // 2D copy DiffA_op, // Vector first difference Hessenberg_op, // Hessenberg decomposition A=UHU' Schur_op, // Schur decomposition A=USU' Lyapunov_op, // Lyapunov equation solver AXA'-X+B=0 }; template <typename T> struct MatrixInputs { MatrixOperation operation; int batch_size; int m; // Usually the dimensions of A and/or Z int n; int p; // Usually the dimensions of B or other parameters int q; int s; // Additional parameters for operations that need more than 4 int t; T tolerance; }; template <typename T> class MatrixTest : public ::testing::TestWithParam<MatrixInputs<T>> { protected: void SetUp() override { using std::vector; params = ::testing::TestWithParam<MatrixInputs<T>>::GetParam(); // Find out whether A, B and Z will be used (depending on the operation) bool use_A = (params.operation != LaggedZ_op); bool use_B = (params.operation == AB_op) || (params.operation == ApB_op) || (params.operation == AmB_op) || (params.operation == AkB_op) || (params.operation == Lyapunov_op); bool use_Z = (params.operation == AZT_op) || (params.operation == ZA_op) || (params.operation == AsolveZ_op) || (params.operation == LaggedZ_op); bool Z_col = (params.operation == AsolveZ_op); int r = params.operation == AZT_op ? params.n : params.m; // Check if the dimensions are valid and compute the output dimensions int m_r, n_r; switch (params.operation) { case AB_op: ASSERT_TRUE(params.n == params.p); m_r = params.m; n_r = params.q; break; case ApB_op: case AmB_op: ASSERT_TRUE(params.m == params.p && params.n == params.q); m_r = params.m; n_r = params.n; break; case AkB_op: m_r = params.m * params.p; n_r = params.n * params.q; break; case AZT_op: m_r = params.m; n_r = 1; break; case ZA_op: m_r = 1; n_r = params.n; break; case AsolveZ_op: ASSERT_TRUE(params.n == params.m); // For this test we multiply A by the solution and check against Z m_r = params.m; n_r = 1; break; case LaggedZ_op: // For this operation params.n holds the number of lags m_r = params.m - params.n; n_r = params.n; break; case CopyA2D_op: // For this operation p and q are the dimensions of the copy window m_r = params.p; n_r = params.q; break; case DiffA_op: // Note: A can represent either a row or column vector ASSERT_TRUE(params.m == 1 || params.n == 1); m_r = std::max(1, params.m - 1); n_r = std::max(1, params.n - 1); break; case Hessenberg_op: case Schur_op: case Lyapunov_op: ASSERT_TRUE(params.m == params.n && params.m == params.p && params.m == params.q); m_r = params.m; n_r = params.m; break; } // Create test matrices and vector std::vector<T> A; std::vector<T> B; std::vector<T> Z; if (use_A) A.resize(params.batch_size * params.m * params.n); if (use_B) B.resize(params.batch_size * params.p * params.q); if (use_Z) Z.resize(params.batch_size * r); // Generate random data std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<T> udis(-1.0, 3.0); for (int i = 0; i < A.size(); i++) A[i] = udis(gen); for (int i = 0; i < B.size(); i++) B[i] = udis(gen); for (int i = 0; i < Z.size(); i++) Z[i] = udis(gen); // Create handles, stream, allocator CUBLAS_CHECK(cublasCreate(&handle)); CUDA_CHECK(cudaStreamCreate(&stream)); auto allocator = std::make_shared<raft::mr::device::default_allocator>(); // Created batched matrices Matrix<T> AbM(params.m, params.n, params.batch_size, handle, allocator, stream); Matrix<T> BbM(params.p, params.q, params.batch_size, handle, allocator, stream); Matrix<T> ZbM(Z_col ? r : 1, Z_col ? 1 : r, params.batch_size, handle, allocator, stream); // Copy the data to the device if (use_A) raft::update_device(AbM.raw_data(), A.data(), A.size(), stream); if (use_B) raft::update_device(BbM.raw_data(), B.data(), B.size(), stream); if (use_Z) raft::update_device(ZbM.raw_data(), Z.data(), Z.size(), stream); // Create fake batched matrices to be overwritten by results res_bM = new Matrix<T>(1, 1, 1, handle, allocator, stream); // Compute the tested results switch (params.operation) { case AB_op: *res_bM = AbM * BbM; break; case ApB_op: *res_bM = AbM + BbM; break; case AmB_op: *res_bM = AbM - BbM; break; case AkB_op: *res_bM = b_kron(AbM, BbM); break; case AZT_op: *res_bM = b_gemm(AbM, ZbM, false, true); break; case ZA_op: *res_bM = ZbM * AbM; break; case AsolveZ_op: // A * A\Z -> should be Z *res_bM = AbM * b_solve(AbM, ZbM); break; case LaggedZ_op: *res_bM = b_lagged_mat(ZbM, params.n); break; case CopyA2D_op: *res_bM = b_2dcopy(AbM, params.s, params.t, params.p, params.q); break; case DiffA_op: *res_bM = AbM.difference(); break; case Hessenberg_op: { constexpr T zero_tolerance = std::is_same<T, double>::value ? 1e-7 : 1e-3f; int n = params.m; Matrix<T> HbM(n, n, params.batch_size, handle, allocator, stream); Matrix<T> UbM(n, n, params.batch_size, handle, allocator, stream); b_hessenberg(AbM, UbM, HbM); // Check that H is in Hessenberg form std::vector<T> H = std::vector<T>(n * n * params.batch_size); raft::update_host(H.data(), HbM.raw_data(), H.size(), stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int ib = 0; ib < params.batch_size; ib++) { for (int j = 0; j < n - 2; j++) { for (int i = j + 2; i < n; i++) { ASSERT_TRUE(raft::abs(H[n * n * ib + n * j + i]) < zero_tolerance); } } } // Check that U is unitary (UU'=I) std::vector<T> UUt = std::vector<T>(n * n * params.batch_size); raft::update_host(UUt.data(), b_gemm(UbM, UbM, false, true).raw_data(), UUt.size(), stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int ib = 0; ib < params.batch_size; ib++) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { ASSERT_TRUE(raft::abs(UUt[n * n * ib + n * j + i] - (i == j ? (T)1 : (T)0)) < zero_tolerance); } } } // Write UHU' in the result (will be compared against A) *res_bM = UbM * b_gemm(HbM, UbM, false, true); break; } case Schur_op: { constexpr T zero_tolerance = std::is_same<T, double>::value ? 1e-7 : 1e-3f; int n = params.m; Matrix<T> SbM(n, n, params.batch_size, handle, allocator, stream); Matrix<T> UbM(n, n, params.batch_size, handle, allocator, stream); b_schur(AbM, UbM, SbM); // Check that S is in Schur form std::vector<T> S = std::vector<T>(n * n * params.batch_size); raft::update_host(S.data(), SbM.raw_data(), S.size(), stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int ib = 0; ib < params.batch_size; ib++) { for (int j = 0; j < n - 2; j++) { for (int i = j + 2; i < n; i++) { ASSERT_TRUE(raft::abs(S[n * n * ib + n * j + i]) < zero_tolerance); } } } for (int ib = 0; ib < params.batch_size; ib++) { for (int k = 0; k < n - 3; k++) { ASSERT_FALSE( raft::abs(S[n * n * ib + n * k + k + 1]) > zero_tolerance && raft::abs(S[n * n * ib + n * (k + 1) + k + 2]) > zero_tolerance && raft::abs(S[n * n * ib + n * (k + 2) + k + 3]) > zero_tolerance); } } // Check that U is unitary (UU'=I) std::vector<T> UUt = std::vector<T>(n * n * params.batch_size); raft::update_host(UUt.data(), b_gemm(UbM, UbM, false, true).raw_data(), UUt.size(), stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int ib = 0; ib < params.batch_size; ib++) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { ASSERT_TRUE(raft::abs(UUt[n * n * ib + n * j + i] - (i == j ? (T)1 : (T)0)) < zero_tolerance); } } } // Write USU' in the result (will be compared against A) *res_bM = UbM * b_gemm(SbM, UbM, false, true); break; } case Lyapunov_op: { Matrix<T> XbM = b_lyapunov(AbM, BbM); // Write AXA'-X in the result (will be compared against -B) *res_bM = AbM * b_gemm(XbM, AbM, false, true) - XbM; break; } } // Compute the expected results res_h.resize(params.batch_size * m_r * n_r); switch (params.operation) { case AB_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::matMul(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, B.data() + bid * params.p * params.q, params.m, params.n, params.q); } break; case ApB_op: Naive::add(res_h.data(), A.data(), B.data(), A.size()); break; case AmB_op: Naive::add(res_h.data(), A.data(), B.data(), A.size(), T(-1.0)); break; case AkB_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::kronecker(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, B.data() + bid * params.p * params.q, params.m, params.n, params.p, params.q); } break; case AZT_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::matMul(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, Z.data() + bid * r, params.m, params.n, 1); } break; case ZA_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::matMul(res_h.data() + bid * m_r * n_r, Z.data() + bid * r, A.data() + bid * params.m * params.n, 1, params.m, params.n); } break; case AsolveZ_op: // Simply copy Z in the result memcpy(res_h.data(), Z.data(), r * params.batch_size * sizeof(T)); break; case LaggedZ_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::laggedMat(res_h.data() + bid * m_r * n_r, Z.data() + bid * params.m, params.m, params.n); } break; case CopyA2D_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::copy2D(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, params.s, params.t, params.m, m_r, n_r); } break; case DiffA_op: { int len = params.m * params.n; for (int bid = 0; bid < params.batch_size; bid++) { Naive::diff(res_h.data() + bid * (len - 1), A.data() + bid * len, len); } break; } case Hessenberg_op: case Schur_op: // Simply copy A (will be compared against UHU') memcpy(res_h.data(), A.data(), params.m * params.m * params.batch_size * sizeof(T)); break; case Lyapunov_op: // Simply copy -B (will be compared against AXA'-X) for (int i = 0; i < params.m * params.m * params.batch_size; i++) { res_h[i] = -B[i]; } break; } CUDA_CHECK(cudaStreamSynchronize(stream)); } void TearDown() override { delete res_bM; CUBLAS_CHECK(cublasDestroy(handle)); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: MatrixInputs<T> params; Matrix<T> *res_bM; std::vector<T> res_h; cublasHandle_t handle; cudaStream_t stream; }; // Test parameters (op, batch_size, m, n, p, q, s, t, tolerance) const std::vector<MatrixInputs<double>> inputsd = { {AB_op, 7, 15, 37, 37, 11, 0, 0, 1e-6}, {AZT_op, 5, 33, 65, 1, 1, 0, 0, 1e-6}, {ZA_op, 8, 12, 41, 1, 1, 0, 0, 1e-6}, {ApB_op, 4, 16, 48, 16, 48, 0, 0, 1e-6}, {AmB_op, 17, 9, 3, 9, 3, 0, 0, 1e-6}, {AkB_op, 5, 3, 13, 31, 8, 0, 0, 1e-6}, {AkB_op, 3, 7, 12, 31, 15, 0, 0, 1e-6}, {AkB_op, 2, 11, 2, 8, 46, 0, 0, 1e-6}, {AsolveZ_op, 6, 17, 17, 1, 1, 0, 0, 1e-6}, {LaggedZ_op, 5, 31, 9, 1, 1, 0, 0, 1e-6}, {LaggedZ_op, 7, 129, 3, 1, 1, 0, 0, 1e-6}, {CopyA2D_op, 11, 31, 63, 17, 14, 5, 9, 1e-6}, {CopyA2D_op, 4, 33, 7, 30, 4, 3, 0, 1e-6}, {DiffA_op, 5, 11, 1, 1, 1, 0, 0, 1e-6}, {DiffA_op, 15, 1, 37, 1, 1, 0, 0, 1e-6}, {Hessenberg_op, 10, 15, 15, 15, 15, 0, 0, 1e-6}, {Hessenberg_op, 30, 61, 61, 61, 61, 0, 0, 1e-6}, // {Schur_op, 7, 12, 12, 12, 12, 0, 0, 1e-3}, // {Schur_op, 17, 77, 77, 77, 77, 0, 0, 1e-3}, // {Lyapunov_op, 5, 14, 14, 14, 14, 0, 0, 1e-2}, // {Lyapunov_op, 13, 100, 100, 100, 100, 0, 0, 1e-2} }; // Note: Schur and Lyapunov tests have had stability issues on CI so // they are disabled temporarily. See issue: // https://github.com/rapidsai/cuml/issues/1949 // Test parameters (op, batch_size, m, n, p, q, s, t, tolerance) const std::vector<MatrixInputs<float>> inputsf = { {AB_op, 7, 15, 37, 37, 11, 0, 0, 1e-2}, {AZT_op, 5, 33, 65, 1, 1, 0, 0, 1e-2}, {ZA_op, 8, 12, 41, 1, 1, 0, 0, 1e-2}, {ApB_op, 4, 16, 48, 16, 48, 0, 0, 1e-2}, {AmB_op, 17, 9, 3, 9, 3, 0, 0, 1e-2}, {AkB_op, 5, 3, 13, 31, 8, 0, 0, 1e-2}, {AkB_op, 3, 7, 12, 31, 15, 0, 0, 1e-2}, {AkB_op, 2, 11, 2, 8, 46, 0, 0, 1e-2}, {AsolveZ_op, 6, 17, 17, 1, 1, 0, 0, 1e-2}, {LaggedZ_op, 5, 31, 9, 1, 1, 0, 0, 1e-5}, {LaggedZ_op, 7, 129, 3, 1, 1, 0, 0, 1e-5}, {CopyA2D_op, 11, 31, 63, 17, 14, 5, 9, 1e-5}, {CopyA2D_op, 4, 33, 7, 30, 4, 3, 0, 1e-5}, {DiffA_op, 5, 11, 1, 1, 1, 0, 0, 1e-2}, {DiffA_op, 15, 1, 37, 1, 1, 0, 0, 1e-2}, {Hessenberg_op, 10, 15, 15, 15, 15, 0, 0, 1e-2}, {Hessenberg_op, 30, 61, 61, 61, 61, 0, 0, 1e-2}, // {Schur_op, 7, 12, 12, 12, 12, 0, 0, 1e-2}, // {Schur_op, 17, 77, 77, 77, 77, 0, 0, 1e-2}, // {Lyapunov_op, 5, 14, 14, 14, 14, 0, 0, 1e-2}, // {Lyapunov_op, 13, 100, 100, 100, 100, 0, 0, 1e-2} }; // Note: Schur and Lyapunov operations don't give good precision for // single-precision floating-point numbers yet... using BatchedMatrixTestD = MatrixTest<double>; using BatchedMatrixTestF = MatrixTest<float>; TEST_P(BatchedMatrixTestD, Result) { ASSERT_TRUE(raft::devArrMatchHost( res_h.data(), res_bM->raw_data(), res_h.size(), raft::CompareApprox<double>(params.tolerance), stream)); } TEST_P(BatchedMatrixTestF, Result) { ASSERT_TRUE(raft::devArrMatchHost( res_h.data(), res_bM->raw_data(), res_h.size(), raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(BatchedMatrixTests, BatchedMatrixTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(BatchedMatrixTests, BatchedMatrixTestF, ::testing::ValuesIn(inputsf)); } // namespace Batched } // namespace LinAlg } // namespace MLCommon
257d02c16faa2f3f77bcff5ae68bff3fd8e54ffb.hip
// !!! This is a file automatically generated by hipify!!! #include "util.h" #include "kernel.h" #include "matrix.h" #include <hip/hip_runtime_api.h> __global__ void mat_mul_nn_kernel(const Matrix A, const Matrix B, Matrix C, const float alpha, const float beta) { float Cvalue = 0; int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; #pragma unroll for (int k = 0; k < A.cols; ++k) Cvalue += A.getValue(r, k) * B.getValue(k, c); Cvalue = C.getValue(r, c) * beta + Cvalue * alpha; C.setValue(r, c, Cvalue); } blas_status sgemm_nn(int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc) { const int Mtile = 16; const int Ntile = 16; const int Ktile = 16; const Matrix a(m, k, lda, const_cast<float*>(A)); const Matrix b(k, n, ldb, const_cast<float*>(B)); Matrix c(m, n, ldc, C); dim3 dimBlock(Mtile, Ntile); dim3 dimGrid(m / Mtile, n / Ntile); hipLaunchKernelGGL(( mat_mul_nn_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, a, b, c, *alpha, *beta); }
257d02c16faa2f3f77bcff5ae68bff3fd8e54ffb.cu
#include "util.h" #include "kernel.h" #include "matrix.h" #include <cuda_runtime_api.h> __global__ void mat_mul_nn_kernel(const Matrix A, const Matrix B, Matrix C, const float alpha, const float beta) { float Cvalue = 0; int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; #pragma unroll for (int k = 0; k < A.cols; ++k) Cvalue += A.getValue(r, k) * B.getValue(k, c); Cvalue = C.getValue(r, c) * beta + Cvalue * alpha; C.setValue(r, c, Cvalue); } blas_status sgemm_nn(int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc) { const int Mtile = 16; const int Ntile = 16; const int Ktile = 16; const Matrix a(m, k, lda, const_cast<float*>(A)); const Matrix b(k, n, ldb, const_cast<float*>(B)); Matrix c(m, n, ldc, C); dim3 dimBlock(Mtile, Ntile); dim3 dimGrid(m / Mtile, n / Ntile); mat_mul_nn_kernel<<<dimGrid, dimBlock>>>(a, b, c, *alpha, *beta); }
0a142004fff004de44c5884afa2c1d161bd7ff0a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // LabelIslandSortArea.cu // #include "LabelIslandSortArea.h" #include "Histogram.h" #include <iostream> using namespace std; // MAX_PAIRS_NUM //- #ifndef MAX_PAIRS_NUM #define MAX_PAIRS_NUM 256 #endif // SORT_ARRAY_TYPE_ASC // #ifndef SORT_ARRAY_TYPE_ASC #define SORT_ARRAY_TYPE_ASC 2 #endif // SORT_ARRAY_TYPE_DESC // #ifndef SORT_ARRAY_TYPE_DESC #define SORT_ARRAY_TYPE_DESC 1 #endif // Kernel : _findAreasByMinMaxKer // static __global__ void _findAreasByMinMaxKer( unsigned int *histogram, // unsigned int minArea, // unsigned int maxArea // ); // Kernel : _bitonicSortPairsByAscendKer // static __global__ void _bitonicSortPairsByAscendKer( unsigned int *devarray, // unsigned int *devareaRank // - ); // Kernel : _bitonicSortPairsByDescendKer // static __global__ void _bitonicSortPairsByDescendKer( unsigned int *devarray, // unsigned int *devareaRank // - ); // Kernel : _bitonicSortPairsByDescendKer static __global__ void _bitonicSortPairsByDescendKer( unsigned int *devarray, unsigned int *devareaRank) { // int tid = threadIdx.x; int k, ixj, j; unsigned int tempArea, tempIndex; // __shared__ unsigned int area[MAX_PAIRS_NUM]; __shared__ unsigned int index[MAX_PAIRS_NUM]; // area[tid] = devarray[tid]; // index[tid] = tid; __syncthreads(); // for (k = 2; k <= MAX_PAIRS_NUM; k = k << 1) { // for (j = k >> 1; j > 0; j = j >> 1) { // ixj i ixj = tid ^ j; if (ixj > tid) { // (tid & k) == 0 if ((tid & k) == 0 && (area[tid] < area[ixj])) { // tempArea = area[tid]; area[tid] = area[ixj]; area[ixj] = tempArea; // tempIndex = index[tid]; index[tid] = index[ixj]; index[ixj] = tempIndex; // (tid & k) == 0 } else if ((tid & k) != 0 && area[tid] > area[ixj]) { // tempArea = area[tid]; area[tid] = area[ixj]; area[ixj] = tempArea; // tempIndex = index[tid]; index[tid] = index[ixj]; index[ixj] = tempIndex; } } __syncthreads(); } } // devareaRank[2 * tid] = area[tid]; // devareaRank[2 * tid + 1] = index[tid]; } // Kernel : _bitonicSortPairsByAscendKer static __global__ void _bitonicSortPairsByAscendKer( unsigned int *devarray, unsigned int *devareaRank) { // int tid = threadIdx.x; int k, ixj, j; unsigned int tempArea, tempIndex; // __shared__ unsigned int area[MAX_PAIRS_NUM]; __shared__ unsigned int index[MAX_PAIRS_NUM]; // area[tid] = devarray[tid]; // index[tid] = tid; __syncthreads(); // for (k = 2; k <= MAX_PAIRS_NUM; k = k << 1) { // for (j = k >> 1; j > 0; j = j >> 1) { // ixj i ixj = tid ^ j; if (ixj > tid) { // (tid & k) == 0 if ((tid & k) == 0 && (area[tid] > area[ixj])) { // tempArea = area[tid]; area[tid] = area[ixj]; area[ixj] = tempArea; // tempIndex = index[tid]; index[tid] = index[ixj]; index[ixj] = tempIndex; // (tid & k) == 0 } else if ((tid & k) != 0 && area[tid] < area[ixj]) { // tempArea = area[tid]; area[tid] = area[ixj]; area[ixj] = tempArea; // tempIndex = index[tid]; index[tid] = index[ixj]; index[ixj] = tempIndex; } } __syncthreads(); } } // devareaRank[2 * tid] = area[tid]; // devareaRank[2 * tid + 1] = index[tid]; } // Host bitonicSortPairs __host__ int LabelIslandSortArea::bitonicSortPairs( unsigned int *inarray, unsigned int *areaRank) { // inarray if (inarray == NULL) return NULL_POINTER; // areaRank if (areaRank == NULL) return NULL_POINTER; if (this->sortflag == SORT_ARRAY_TYPE_ASC) // hipLaunchKernelGGL(( _bitonicSortPairsByAscendKer), dim3(1), dim3(MAX_PAIRS_NUM), 0, 0, inarray, areaRank); else if (this->sortflag == SORT_ARRAY_TYPE_DESC) // hipLaunchKernelGGL(( _bitonicSortPairsByDescendKer), dim3(1), dim3(MAX_PAIRS_NUM), 0, 0, inarray, areaRank); // CUDA if (hipGetLastError() != hipSuccess) return CUDA_ERROR; return NO_ERROR; } // Kernel : _findAreasByMinMaxKer static __global__ void _findAreasByMinMaxKer( unsigned int *histogram, unsigned int minArea, unsigned int maxArea) { // int tid = threadIdx.x; histogram[0] = 0; // 0 if (histogram[tid] < minArea || histogram[tid] > maxArea) histogram[tid] = 0; } // Host labelIslandSortArea __host__ int LabelIslandSortArea::labelIslandSortArea( Image *inimg, unsigned int *areaRank) { // NULL if (inimg == NULL) return NULL_POINTER; // areaRank if (areaRank == NULL) return NULL_POINTER; // if (minarea < 0 || maxarea < 0 || (sortflag != SORT_ARRAY_TYPE_ASC && sortflag != SORT_ARRAY_TYPE_DESC)) return INVALID_DATA; // Device // int errcode; // // Device errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // ROI ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // Device hipError_t cudaerrcode; unsigned int *alldevicedata; unsigned int *devhistogram, *devareaRank; cudaerrcode = hipMalloc((void**)&alldevicedata, 3 * MAX_PAIRS_NUM * sizeof (unsigned int)); if (cudaerrcode != hipSuccess) return cudaerrcode; // Device cudaerrcode = hipMemset(alldevicedata, 0, 3 * MAX_PAIRS_NUM * sizeof (unsigned int)); if (cudaerrcode != hipSuccess) return cudaerrcode; // devhistogram devhistogram = alldevicedata; // . Histogram hist; errcode = hist.histogram(inimg, devhistogram, 0); if (errcode != NO_ERROR) return errcode; // hipLaunchKernelGGL(( _findAreasByMinMaxKer), dim3(1), dim3(MAX_PAIRS_NUM), 0, 0, devhistogram, minarea, maxarea); // CUDA if (hipGetLastError() != hipSuccess) { hipFree(alldevicedata); return CUDA_ERROR; } // areaRank Host if (this->ishost == 1) { // devareaRank devareaRank = alldevicedata + MAX_PAIRS_NUM; // bitonicSortPairs(devhistogram, devareaRank); // Device devareaRank Host cudaerrcode = hipMemcpy(areaRank, devareaRank, MAX_PAIRS_NUM * 2 * sizeof(unsigned int), hipMemcpyDeviceToHost); if (cudaerrcode != hipSuccess) return cudaerrcode; // int k; this->length = 0; for (k = 0; k< MAX_PAIRS_NUM; k++) { if (areaRank [2 * k] > 0) this->length++; } // areaRank 0 // MAX_PAIRS_NUM // // areaRank = [0, 0, 0, 0, ......, 50000, 8, 60000, 3, 70000, 6] // areaRank = [50000, 8, 60000, 3, 70000, 6, 0, 0, 0, 0, // ......] int i, j; if (sortflag == 2) { if (areaRank[0] == 0) { j = 0; for (i = 0; i < MAX_PAIRS_NUM; i++) { // 0 if (areaRank[2 * i] > 0) { areaRank[2 * j] = areaRank[2 * i]; areaRank[2 * j + 1] = areaRank[2 * i + 1]; areaRank[2 * i] = 0; areaRank[2 * i + 1] = 0; j++; } } } } // areaRank Device } else if (this->ishost == 0) { // Host unsigned int hostareaRank[MAX_PAIRS_NUM * 2]; // devareaRank devareaRank = alldevicedata + MAX_PAIRS_NUM; // bitonicSortPairs(devhistogram, areaRank); // Device areaRank Host cudaerrcode = hipMemcpy(hostareaRank, areaRank, MAX_PAIRS_NUM * 2 * sizeof(unsigned int), hipMemcpyDeviceToHost); if (cudaerrcode != hipSuccess) return cudaerrcode; // int k; this->length = 0; for (k = 0; k< MAX_PAIRS_NUM; k++) { if (hostareaRank [2 * k] > 0) this->length++; } // hostareaRank 0 // MAX_PAIRS_NUM // // hostareaRank = [0, 0, 0, 0, ......, 50000, 8, 60000, // 3, 70000, 6] hostareaRank = [50000, 8, 60000, 3, 70000, // 6, 0, 0, 0, 0,......] int i, j; if (sortflag == 2) { if (hostareaRank[0] == 0) { j = 0; for (i = 0; i < MAX_PAIRS_NUM; i++) { // 0 if (hostareaRank[2 * i] > 0) { hostareaRank[2 * j] = hostareaRank[2 * i]; hostareaRank[2 * j + 1] = hostareaRank[2 * i + 1]; hostareaRank[2 * i] = 0; hostareaRank[2 * i + 1] = 0; j++; } } } } } // hipFree(alldevicedata); return NO_ERROR; }
0a142004fff004de44c5884afa2c1d161bd7ff0a.cu
// LabelIslandSortArea.cu // 实现区域排序算法 #include "LabelIslandSortArea.h" #include "Histogram.h" #include <iostream> using namespace std; // 宏:MAX_PAIRS_NUM //(面积值-标记值)键值对的个数。 #ifndef MAX_PAIRS_NUM #define MAX_PAIRS_NUM 256 #endif // 宏:SORT_ARRAY_TYPE_ASC // 排序标识,升序排序。 #ifndef SORT_ARRAY_TYPE_ASC #define SORT_ARRAY_TYPE_ASC 2 #endif // 宏:SORT_ARRAY_TYPE_DESC // 排序标识,降序排序。 #ifndef SORT_ARRAY_TYPE_DESC #define SORT_ARRAY_TYPE_DESC 1 #endif // Kernel 函数: _findAreasByMinMaxKer(筛选面积) // 筛选出在最大最小面积范围之间的标记区域。 static __global__ void _findAreasByMinMaxKer( unsigned int *histogram, // 直方图面积。 unsigned int minArea, // 最小面积。 unsigned int maxArea // 最大面积。 ); // Kernel 函数: _bitonicSortPairsByAscendKer(按照升序排序区域面积) // 实现并行双调排序,按照升序排序区域面积。 static __global__ void _bitonicSortPairsByAscendKer( unsigned int *devarray, // 面积数组。 unsigned int *devareaRank // 输出的(面积值-标记值)键值对。 ); // Kernel 函数: _bitonicSortPairsByDescendKer(按照降序排序区域面积) // 实现并行双调排序,按照降序排序区域面积。 static __global__ void _bitonicSortPairsByDescendKer( unsigned int *devarray, // 面积数组。 unsigned int *devareaRank // 输出的(面积值-标记值)键值对。 ); // Kernel 函数: _bitonicSortPairsByDescendKer(按照降序排序区域面积) static __global__ void _bitonicSortPairsByDescendKer( unsigned int *devarray, unsigned int *devareaRank) { // 读取线程号。 int tid = threadIdx.x; int k, ixj, j; unsigned int tempArea, tempIndex; // 声明共享内存,加快数据存取速度。 __shared__ unsigned int area[MAX_PAIRS_NUM]; __shared__ unsigned int index[MAX_PAIRS_NUM]; // 将面积值拷贝到共享内存中。 area[tid] = devarray[tid]; // 将标记值拷贝到共享内存了。 index[tid] = tid; __syncthreads(); // 并行双调排序,降序排序。 for (k = 2; k <= MAX_PAIRS_NUM; k = k << 1) { // 双调合并。 for (j = k >> 1; j > 0; j = j >> 1) { // ixj 是与当前位置 i 进行比较交换的位置。 ixj = tid ^ j; if (ixj > tid) { // 如果 (tid & k) == 0,按照降序交换两项。 if ((tid & k) == 0 && (area[tid] < area[ixj])) { // 交换面积值。 tempArea = area[tid]; area[tid] = area[ixj]; area[ixj] = tempArea; // 交换下标值。 tempIndex = index[tid]; index[tid] = index[ixj]; index[ixj] = tempIndex; // 如果 (tid & k) == 0,按照升序交换两项。 } else if ((tid & k) != 0 && area[tid] > area[ixj]) { // 交换面积值。 tempArea = area[tid]; area[tid] = area[ixj]; area[ixj] = tempArea; // 交换下标值。 tempIndex = index[tid]; index[tid] = index[ixj]; index[ixj] = tempIndex; } } __syncthreads(); } } // 将共享内存中的面积值拷贝到全局内存中。 devareaRank[2 * tid] = area[tid]; // 将共享内存中的下标值拷贝到全局内存中。 devareaRank[2 * tid + 1] = index[tid]; } // Kernel 函数: _bitonicSortPairsByAscendKer(按照升序排序区域面积) static __global__ void _bitonicSortPairsByAscendKer( unsigned int *devarray, unsigned int *devareaRank) { // 读取线程号。 int tid = threadIdx.x; int k, ixj, j; unsigned int tempArea, tempIndex; // 声明共享内存,加快数据存取速度。 __shared__ unsigned int area[MAX_PAIRS_NUM]; __shared__ unsigned int index[MAX_PAIRS_NUM]; // 将面积值拷贝到共享内存中。 area[tid] = devarray[tid]; // 将标记值拷贝到共享内存了。 index[tid] = tid; __syncthreads(); // 并行双调排序,升序排序。 for (k = 2; k <= MAX_PAIRS_NUM; k = k << 1) { // 双调合并。 for (j = k >> 1; j > 0; j = j >> 1) { // ixj 是与当前位置 i 进行比较交换的位置。 ixj = tid ^ j; if (ixj > tid) { // 如果 (tid & k) == 0,按照升序交换两项。 if ((tid & k) == 0 && (area[tid] > area[ixj])) { // 交换面积值。 tempArea = area[tid]; area[tid] = area[ixj]; area[ixj] = tempArea; // 交换下标值。 tempIndex = index[tid]; index[tid] = index[ixj]; index[ixj] = tempIndex; // 如果 (tid & k) == 0,按照降序交换两项。 } else if ((tid & k) != 0 && area[tid] < area[ixj]) { // 交换面积值。 tempArea = area[tid]; area[tid] = area[ixj]; area[ixj] = tempArea; // 交换下标值。 tempIndex = index[tid]; index[tid] = index[ixj]; index[ixj] = tempIndex; } } __syncthreads(); } } // 将共享内存中的面积值拷贝到全局内存中。 devareaRank[2 * tid] = area[tid]; // 将共享内存中的下标值拷贝到全局内存中。 devareaRank[2 * tid + 1] = index[tid]; } // Host 成员方法:bitonicSortPairs(对区域面积进行排序) __host__ int LabelIslandSortArea::bitonicSortPairs( unsigned int *inarray, unsigned int *areaRank) { // 检查 inarray 是否为空 if (inarray == NULL) return NULL_POINTER; // 检查 areaRank 是否为空 if (areaRank == NULL) return NULL_POINTER; if (this->sortflag == SORT_ARRAY_TYPE_ASC) // 升序排序区域面积。 _bitonicSortPairsByAscendKer<<<1, MAX_PAIRS_NUM>>>(inarray, areaRank); else if (this->sortflag == SORT_ARRAY_TYPE_DESC) // 降序排序区域面积。 _bitonicSortPairsByDescendKer<<<1, MAX_PAIRS_NUM>>>(inarray, areaRank); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; return NO_ERROR; } // Kernel 函数: _findAreasByMinMaxKer(筛选面积) static __global__ void _findAreasByMinMaxKer( unsigned int *histogram, unsigned int minArea, unsigned int maxArea) { // 获取线程号。 int tid = threadIdx.x; histogram[0] = 0; // 如果直方图面积不在最大最小面积范围内,则将其对应面积清0。 if (histogram[tid] < minArea || histogram[tid] > maxArea) histogram[tid] = 0; } // Host 成员方法:labelIslandSortArea(对标记后的所有区域按照面积进行排序) __host__ int LabelIslandSortArea::labelIslandSortArea( Image *inimg, unsigned int *areaRank) { // 检查图像是否为 NULL。 if (inimg == NULL) return NULL_POINTER; // 检查 areaRank 是否为空 if (areaRank == NULL) return NULL_POINTER; // 检查参数是否合法。 if (minarea < 0 || maxarea < 0 || (sortflag != SORT_ARRAY_TYPE_ASC && sortflag != SORT_ARRAY_TYPE_DESC)) return INVALID_DATA; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为输 // 入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 在 Device 上分配临时空间。一次申请所有空间,然后通过偏移索引各个数组。 cudaError_t cudaerrcode; unsigned int *alldevicedata; unsigned int *devhistogram, *devareaRank; cudaerrcode = cudaMalloc((void**)&alldevicedata, 3 * MAX_PAIRS_NUM * sizeof (unsigned int)); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 初始化 Device 上的内存空间。 cudaerrcode = cudaMemset(alldevicedata, 0, 3 * MAX_PAIRS_NUM * sizeof (unsigned int)); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 通过偏移读取 devhistogram 内存空间。 devhistogram = alldevicedata; // 通过直方图计算区域面积. Histogram hist; errcode = hist.histogram(inimg, devhistogram, 0); if (errcode != NO_ERROR) return errcode; // 筛选出在最大最小面积范围之间的标记区域。 _findAreasByMinMaxKer<<<1, MAX_PAIRS_NUM>>>(devhistogram, minarea, maxarea); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicedata); return CUDA_ERROR; } // areaRank 在 Host 端。 if (this->ishost == 1) { // 通过偏移读取 devareaRank 内存空间。 devareaRank = alldevicedata + MAX_PAIRS_NUM; // 调用并行双调排序函数,对所选面积进行排序。 bitonicSortPairs(devhistogram, devareaRank); //将 Device上的 devareaRank 拷贝到 Host上。 cudaerrcode = cudaMemcpy(areaRank, devareaRank, MAX_PAIRS_NUM * 2 * sizeof(unsigned int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 计算满足条件的不同区域的个数 int k; this->length = 0; for (k = 0; k< MAX_PAIRS_NUM; k++) { if (areaRank [2 * k] > 0) this->length++; } // 当 areaRank 按照升序排序时,在数组的前面会有很多无效的 0 项,因为 // 区域的个数可能小于定义的 MAX_PAIRS_NUM,所以需要重新颠倒数组,使得 // 有效的非零数据位于数组的前面。下面的代码就是解决此问题的。例如处理 // 前假设 areaRank = [0, 0, 0, 0, ......, 50000, 8, 60000, 3, 70000, 6] // ,那么处理后 areaRank = [50000, 8, 60000, 3, 70000, 6, 0, 0, 0, 0, // ......]。 int i, j; if (sortflag == 2) { if (areaRank[0] == 0) { j = 0; for (i = 0; i < MAX_PAIRS_NUM; i++) { // 如果面积大于0,则迁移。 if (areaRank[2 * i] > 0) { areaRank[2 * j] = areaRank[2 * i]; areaRank[2 * j + 1] = areaRank[2 * i + 1]; areaRank[2 * i] = 0; areaRank[2 * i + 1] = 0; j++; } } } } // areaRank 在 Device 端。 } else if (this->ishost == 0) { // 声明 Host 端数组,为以后的处理做准备。 unsigned int hostareaRank[MAX_PAIRS_NUM * 2]; // 通过偏移读取 devareaRank 内存空间。 devareaRank = alldevicedata + MAX_PAIRS_NUM; // 调用并行双调排序函数,对所选面积进行排序。 bitonicSortPairs(devhistogram, areaRank); //将 Device上的 areaRank 拷贝到 Host上。 cudaerrcode = cudaMemcpy(hostareaRank, areaRank, MAX_PAIRS_NUM * 2 * sizeof(unsigned int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 计算满足条件的不同区域的个数 int k; this->length = 0; for (k = 0; k< MAX_PAIRS_NUM; k++) { if (hostareaRank [2 * k] > 0) this->length++; } // 当 hostareaRank 按照升序排序时,在数组的前面会有很多无效的 0 项, // 因为区域的个数可能小于定义的 MAX_PAIRS_NUM,所以需要重新颠倒数组, // 使得有效的非零数据位于数组的前面。下面的代码就是解决此问题的。 // 例如处理前假设 hostareaRank = [0, 0, 0, 0, ......, 50000, 8, 60000, // 3, 70000, 6],那么处理后 hostareaRank = [50000, 8, 60000, 3, 70000, // 6, 0, 0, 0, 0,......]。 int i, j; if (sortflag == 2) { if (hostareaRank[0] == 0) { j = 0; for (i = 0; i < MAX_PAIRS_NUM; i++) { // 如果面积大于0,则迁移。 if (hostareaRank[2 * i] > 0) { hostareaRank[2 * j] = hostareaRank[2 * i]; hostareaRank[2 * j + 1] = hostareaRank[2 * i + 1]; hostareaRank[2 * i] = 0; hostareaRank[2 * i + 1] = 0; j++; } } } } } // 释放显存上的临时空间。 cudaFree(alldevicedata); return NO_ERROR; }
57a986f7a3c805c347a61205d62a774cf4c0869f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define Width 31 #define TITE_WIDTH 16 __global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd, int ncols) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; printf("Block ID X : %d and Block ID Y: %d\n", blockIdx.x,blockIdx.y); float Pvalue = 0; if(row < Width || col < Width){ for(int k=0;k<ncols;k++){ float Melement = Md[row*ncols+k]; float Nelement = Nd[k*ncols+col]; Pvalue += Melement * Nelement; } } Pd[row*ncols+col] = Pvalue; } int main (int argc, char *argv[]){ int i,j; int size = Width * Width * sizeof(float); float M[Width][Width], N[Width][Width], P[Width][Width]; float* Md, *Nd, *Pd; for(i=0;i<Width;i++){ for(j=0;j<Width;j++){ M[i][j] = 1; N[i][j] = 2; } } hipMalloc( (void**)&Md, size); hipMalloc( (void**)&Nd, size); hipMalloc( (void**)&Pd, size); hipMemcpy( Md, M, size, hipMemcpyHostToDevice); hipMemcpy( Nd, N, size, hipMemcpyHostToDevice); dim3 dimBlock(TITE_WIDTH, TITE_WIDTH); dim3 dimGrid((Width+TITE_WIDTH-1)/TITE_WIDTH,(Width+TITE_WIDTH-1)/TITE_WIDTH); hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd, Width); hipMemcpy(P, Pd, size, hipMemcpyDeviceToHost); hipFree(Md); hipFree(Nd); hipFree(Pd); printf("\n================================\n"); for(i=0;i<Width;i++){ for(j=0;j<Width;j++){ printf("%.2f ", P[i][j]); } } }
57a986f7a3c805c347a61205d62a774cf4c0869f.cu
#include <stdio.h> #define Width 31 #define TITE_WIDTH 16 __global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd, int ncols) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; printf("Block ID X : %d and Block ID Y: %d\n", blockIdx.x,blockIdx.y); float Pvalue = 0; if(row < Width || col < Width){ for(int k=0;k<ncols;k++){ float Melement = Md[row*ncols+k]; float Nelement = Nd[k*ncols+col]; Pvalue += Melement * Nelement; } } Pd[row*ncols+col] = Pvalue; } int main (int argc, char *argv[]){ int i,j; int size = Width * Width * sizeof(float); float M[Width][Width], N[Width][Width], P[Width][Width]; float* Md, *Nd, *Pd; for(i=0;i<Width;i++){ for(j=0;j<Width;j++){ M[i][j] = 1; N[i][j] = 2; } } cudaMalloc( (void**)&Md, size); cudaMalloc( (void**)&Nd, size); cudaMalloc( (void**)&Pd, size); cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice); cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice); dim3 dimBlock(TITE_WIDTH, TITE_WIDTH); dim3 dimGrid((Width+TITE_WIDTH-1)/TITE_WIDTH,(Width+TITE_WIDTH-1)/TITE_WIDTH); MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, Width); cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost); cudaFree(Md); cudaFree(Nd); cudaFree(Pd); printf("\n================================\n"); for(i=0;i<Width;i++){ for(j=0;j<Width;j++){ printf("%.2f ", P[i][j]); } } }
8e06f88a3d194013848ca872de142d337d0a5400.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/local_response_normalization_op.h" namespace caffe2 { namespace { template <typename T> __global__ void LRNFillScaleNCHW(const int nthreads, const T* in, const int num, const int channels, const int height, const int width, const int size, const T alpha_over_size, const T bias, T* scale) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local offset int w = index % width; int h = (index / width) % height; int n = index / width / height; int offset = (n * channels * height + h) * width + w; int step = height * width; in += offset; scale += offset; int head = 0; int pre_pad = (size - 1) / 2; int post_pad = size - pre_pad - 1; T accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad) { accum_scale += in[head * step] * in[head * step]; ++head; } // until we reach size, nothing needs to be subtracted while (head < size) { accum_scale += in[head * step] * in[head * step]; scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size; ++head; } // both add and subtract while (head < channels) { accum_scale += in[head * step] * in[head * step]; accum_scale -= in[(head - size) * step] * in[(head - size) * step]; scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { accum_scale -= in[(head - size) * step] * in[(head - size) * step]; scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size; ++head; } // recover the pointers for the next loop. in -= offset; scale -= offset; } } template <typename T> __global__ void LRNFillScaleNHWC(const int nthreads, const T* in, const int num, const int height, const int width, const int channels, const int size, const T alpha_over_size, const T bias, T* scale) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int c = index % channels; int pre_pad = (size - 1) / 2; scale[index] = 0; for (int i = 0; i < size; ++i) { int raw_idx = c + i - pre_pad; if (raw_idx >= 0 && raw_idx < channels) { scale[index] += in[index + i - pre_pad] * in[index + i - pre_pad]; } } scale[index] = bias + scale[index] * alpha_over_size; } } // TODO(Yangqing): check if it would be faster to just put it into the previous // kernel. template <typename T> __global__ void LRNComputeOutput(const int nthreads, const T* in, const T* scale, const T negative_beta, T* out) { CUDA_1D_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } template <typename T> __global__ void LRNComputeDiffNCHW(const int nthreads, const T* bottom_data, const T* top_data, const T* scale, const T* top_diff, const int num, const int channels, const int height, const int width, const int size, const T negative_beta, const T cache_ratio, T* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local offset int w = index % width; int h = (index / width) % height; int n = index / width / height; int offset = (n * channels * height + h) * width + w; int step = height * width; bottom_data += offset; top_data += offset; scale += offset; top_diff += offset; bottom_diff += offset; int head = 0; int pre_pad = size - (size + 1) / 2; int post_pad = size - pre_pad - 1; T accum_ratio = 0; // accumulate values while (head < post_pad) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; ++head; } // until we reach size, nothing needs to be subtracted while (head < size) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; accum_ratio -= top_diff[(head - size) * step] * top_data[(head - size) * step] / scale[(head - size) * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { accum_ratio -= top_diff[(head - size) * step] * top_data[(head - size) * step] / scale[(head - size) * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // recover pointer for next iteration. bottom_data -= offset; top_data -= offset; scale -= offset; top_diff -= offset; bottom_diff -= offset; } } // This local response normalization gradient does one sum per output location // and does not use the running trick for 1-d convolution: thus it might not be // the fastest implementation. template <typename T> __global__ void LRNComputeDiffNHWC(const int nthreads, const T* bottom_data, const T* top_data, const T* scale, const T* top_diff, const int num, const int height, const int width, const int channels, const int size, const T negative_beta, const T cache_ratio, T* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local channel offset int c = index % channels; int pre_pad = size / 2; T accum_ratio = 0; for (int i = -pre_pad; i < size - pre_pad; ++i) { if (c + i >= 0 && c + i < channels) { accum_ratio += top_diff[index + i] * top_data[index + i] / scale[index + i]; } } bottom_diff[index] = top_diff[index] * pow(scale[index], negative_beta) - cache_ratio * bottom_data[index] * accum_ratio; } } } // namespace template<> bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); const int W = X.dim32(3); const float* Xdata = X.data<float>(); auto* Y = Output(0, X.sizes(), at::dtype<float>()); float* Ydata = Y->template mutable_data<float>(); if (OutputSize() > 1) { scale_ = Output(1); } else { if (!scale_) { scale_ = &local_scale_tensor_; } } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = N * H * W; hipLaunchKernelGGL(( LRNFillScaleNCHW<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data); n_threads = X.size(); hipLaunchKernelGGL(( LRNComputeOutput<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, scale_data, -beta_, Ydata); return true; } template<> bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int H = X.dim32(1); const int W = X.dim32(2); const int C = X.dim32(3); const float* Xdata = X.data<float>(); auto* Y = Output(0, X.sizes(), at::dtype<float>()); float* Ydata = Y->template mutable_data<float>(); if (OutputSize() > 1) { scale_ = Output(1); } else { if (!scale_) { scale_ = &local_scale_tensor_; } } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = X.size(); hipLaunchKernelGGL(( LRNFillScaleNHWC<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data); hipLaunchKernelGGL(( LRNComputeOutput<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, scale_data, -beta_, Ydata); return true; } template <> bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); const int W = X.dim32(3); // Loosely checking the size, assuming that the shapes will be the same as // long as the sizes check out. DCHECK_EQ(X.size(), Y.size()); DCHECK_EQ(X.size(), dY.size()); auto* dX = Output(0, X.sizes(), at::dtype<float>()); const float* Xdata = X.data<float>(); const float* Ydata = Y.data<float>(); if (!scale_) { scale_ = &local_scale_tensor_; } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = N * H * W; hipLaunchKernelGGL(( LRNFillScaleNCHW<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data); const float* dYdata = dY.data<float>(); float* dXdata = dX->template mutable_data<float>(); hipLaunchKernelGGL(( LRNComputeDiffNCHW<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, Ydata, scale_data, dYdata, N, C, H, W, size_, -beta_, 2.f * alpha_ * beta_ / size_, dXdata); return true; } template <> bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int H = X.dim32(1); const int W = X.dim32(2); const int C = X.dim32(3); const float* Xdata = X.data<float>(); // Loosely checking the size, assuming that the shapes will be the same as // long as the sizes check out. DCHECK_EQ(X.size(), Y.size()); DCHECK_EQ(X.size(), dY.size()); auto* dX = Output(0, X.sizes(), at::dtype<float>()); if (!scale_) { scale_ = &local_scale_tensor_; } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = X.size(); hipLaunchKernelGGL(( LRNFillScaleNHWC<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data); hipLaunchKernelGGL(( LRNComputeDiffNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y.data<float>(), scale_data, dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), size_, -beta_, 2.f * alpha_ * beta_ / size_, dX->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(LRN, LRNOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(LRNGradient, LRNGradientOp<float, CUDAContext>); } // namespace caffe2
8e06f88a3d194013848ca872de142d337d0a5400.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/local_response_normalization_op.h" namespace caffe2 { namespace { template <typename T> __global__ void LRNFillScaleNCHW(const int nthreads, const T* in, const int num, const int channels, const int height, const int width, const int size, const T alpha_over_size, const T bias, T* scale) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local offset int w = index % width; int h = (index / width) % height; int n = index / width / height; int offset = (n * channels * height + h) * width + w; int step = height * width; in += offset; scale += offset; int head = 0; int pre_pad = (size - 1) / 2; int post_pad = size - pre_pad - 1; T accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad) { accum_scale += in[head * step] * in[head * step]; ++head; } // until we reach size, nothing needs to be subtracted while (head < size) { accum_scale += in[head * step] * in[head * step]; scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size; ++head; } // both add and subtract while (head < channels) { accum_scale += in[head * step] * in[head * step]; accum_scale -= in[(head - size) * step] * in[(head - size) * step]; scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { accum_scale -= in[(head - size) * step] * in[(head - size) * step]; scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size; ++head; } // recover the pointers for the next loop. in -= offset; scale -= offset; } } template <typename T> __global__ void LRNFillScaleNHWC(const int nthreads, const T* in, const int num, const int height, const int width, const int channels, const int size, const T alpha_over_size, const T bias, T* scale) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int c = index % channels; int pre_pad = (size - 1) / 2; scale[index] = 0; for (int i = 0; i < size; ++i) { int raw_idx = c + i - pre_pad; if (raw_idx >= 0 && raw_idx < channels) { scale[index] += in[index + i - pre_pad] * in[index + i - pre_pad]; } } scale[index] = bias + scale[index] * alpha_over_size; } } // TODO(Yangqing): check if it would be faster to just put it into the previous // kernel. template <typename T> __global__ void LRNComputeOutput(const int nthreads, const T* in, const T* scale, const T negative_beta, T* out) { CUDA_1D_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } template <typename T> __global__ void LRNComputeDiffNCHW(const int nthreads, const T* bottom_data, const T* top_data, const T* scale, const T* top_diff, const int num, const int channels, const int height, const int width, const int size, const T negative_beta, const T cache_ratio, T* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local offset int w = index % width; int h = (index / width) % height; int n = index / width / height; int offset = (n * channels * height + h) * width + w; int step = height * width; bottom_data += offset; top_data += offset; scale += offset; top_diff += offset; bottom_diff += offset; int head = 0; int pre_pad = size - (size + 1) / 2; int post_pad = size - pre_pad - 1; T accum_ratio = 0; // accumulate values while (head < post_pad) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; ++head; } // until we reach size, nothing needs to be subtracted while (head < size) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; accum_ratio -= top_diff[(head - size) * step] * top_data[(head - size) * step] / scale[(head - size) * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { accum_ratio -= top_diff[(head - size) * step] * top_data[(head - size) * step] / scale[(head - size) * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // recover pointer for next iteration. bottom_data -= offset; top_data -= offset; scale -= offset; top_diff -= offset; bottom_diff -= offset; } } // This local response normalization gradient does one sum per output location // and does not use the running trick for 1-d convolution: thus it might not be // the fastest implementation. template <typename T> __global__ void LRNComputeDiffNHWC(const int nthreads, const T* bottom_data, const T* top_data, const T* scale, const T* top_diff, const int num, const int height, const int width, const int channels, const int size, const T negative_beta, const T cache_ratio, T* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local channel offset int c = index % channels; int pre_pad = size / 2; T accum_ratio = 0; for (int i = -pre_pad; i < size - pre_pad; ++i) { if (c + i >= 0 && c + i < channels) { accum_ratio += top_diff[index + i] * top_data[index + i] / scale[index + i]; } } bottom_diff[index] = top_diff[index] * pow(scale[index], negative_beta) - cache_ratio * bottom_data[index] * accum_ratio; } } } // namespace template<> bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); const int W = X.dim32(3); const float* Xdata = X.data<float>(); auto* Y = Output(0, X.sizes(), at::dtype<float>()); float* Ydata = Y->template mutable_data<float>(); if (OutputSize() > 1) { scale_ = Output(1); } else { if (!scale_) { scale_ = &local_scale_tensor_; } } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = N * H * W; LRNFillScaleNCHW<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data); n_threads = X.size(); LRNComputeOutput<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, scale_data, -beta_, Ydata); return true; } template<> bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int H = X.dim32(1); const int W = X.dim32(2); const int C = X.dim32(3); const float* Xdata = X.data<float>(); auto* Y = Output(0, X.sizes(), at::dtype<float>()); float* Ydata = Y->template mutable_data<float>(); if (OutputSize() > 1) { scale_ = Output(1); } else { if (!scale_) { scale_ = &local_scale_tensor_; } } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = X.size(); LRNFillScaleNHWC<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data); LRNComputeOutput<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, scale_data, -beta_, Ydata); return true; } template <> bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); const int W = X.dim32(3); // Loosely checking the size, assuming that the shapes will be the same as // long as the sizes check out. DCHECK_EQ(X.size(), Y.size()); DCHECK_EQ(X.size(), dY.size()); auto* dX = Output(0, X.sizes(), at::dtype<float>()); const float* Xdata = X.data<float>(); const float* Ydata = Y.data<float>(); if (!scale_) { scale_ = &local_scale_tensor_; } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = N * H * W; LRNFillScaleNCHW<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data); const float* dYdata = dY.data<float>(); float* dXdata = dX->template mutable_data<float>(); LRNComputeDiffNCHW<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, Ydata, scale_data, dYdata, N, C, H, W, size_, -beta_, 2.f * alpha_ * beta_ / size_, dXdata); return true; } template <> bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int H = X.dim32(1); const int W = X.dim32(2); const int C = X.dim32(3); const float* Xdata = X.data<float>(); // Loosely checking the size, assuming that the shapes will be the same as // long as the sizes check out. DCHECK_EQ(X.size(), Y.size()); DCHECK_EQ(X.size(), dY.size()); auto* dX = Output(0, X.sizes(), at::dtype<float>()); if (!scale_) { scale_ = &local_scale_tensor_; } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = X.size(); LRNFillScaleNHWC<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data); LRNComputeDiffNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y.data<float>(), scale_data, dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), size_, -beta_, 2.f * alpha_ * beta_ / size_, dX->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(LRN, LRNOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(LRNGradient, LRNGradientOp<float, CUDAContext>); } // namespace caffe2
c53ddcb345d8fb2c216cbef3a6cbef323d078a70.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <cstdlib> #include <cstdio> #include <chrono> #include <hip/hip_runtime.h> __global__ void rotate_matrix_parallel (float *matrix, const int n) { int layer = blockIdx.x * blockDim.x + threadIdx.x; if (layer < n/2) { int first = layer; int last = n - 1 - layer; for(int i = first; i < last; ++i) { int offset = i - first; float top = matrix[first*n+i]; // save top // left -> top matrix[first*n+i] = matrix[(last-offset)*n+first]; // bottom -> left matrix[(last-offset)*n+first] = matrix[last*n+(last-offset)]; // right -> bottom matrix[last*n+(last-offset)] = matrix[i*n+last]; // top -> right matrix[i*n+last] = top; // right <- saved top } } } void rotate_matrix_serial(float *matrix, int n) { for (int layer = 0; layer < n / 2; ++layer) { int first = layer; int last = n - 1 - layer; for(int i = first; i < last; ++i) { int offset = i - first; float top = matrix[first*n+i]; // save top // left -> top matrix[first*n+i] = matrix[(last-offset)*n+first]; // bottom -> left matrix[(last-offset)*n+first] = matrix[last*n+(last-offset)]; // right -> bottom matrix[last*n+(last-offset)] = matrix[i*n+last]; // top -> right matrix[i*n+last] = top; // right <- saved top } } } int main(int argc, char** argv) { if (argc != 3) { printf("Usage: %s <matrix size> <repeat>\n", argv[0]); return 1; } const int n = atoi(argv[1]); const int repeat = atoi(argv[2]); float *serial_res = (float*) aligned_alloc(1024, n*n*sizeof(float)); float *parallel_res = (float*) aligned_alloc(1024, n*n*sizeof(float)); for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) serial_res[i*n+j] = parallel_res[i*n+j] = i*n+j; for (int i = 0; i < repeat; i++) { rotate_matrix_serial(serial_res, n); } float *d_parallel_res; hipMalloc((void**)&d_parallel_res, n*n*sizeof(float)); hipMemcpy(d_parallel_res, parallel_res, n*n*sizeof(float), hipMemcpyHostToDevice); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( rotate_matrix_parallel), dim3((n/2+255)/256), dim3(256), 0, 0, d_parallel_res, n); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat); hipMemcpy(parallel_res, d_parallel_res, n*n*sizeof(float), hipMemcpyDeviceToHost); bool ok = true; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (serial_res[i*n+j] != parallel_res[i*n+j]) { ok = false; break; } } } printf("%s\n", ok ? "PASS" : "FAIL"); free(serial_res); free(parallel_res); hipFree(d_parallel_res); return 0; }
c53ddcb345d8fb2c216cbef3a6cbef323d078a70.cu
#include <cmath> #include <cstdlib> #include <cstdio> #include <chrono> #include <hip/hip_runtime.h> __global__ void rotate_matrix_parallel (float *matrix, const int n) { int layer = blockIdx.x * blockDim.x + threadIdx.x; if (layer < n/2) { int first = layer; int last = n - 1 - layer; for(int i = first; i < last; ++i) { int offset = i - first; float top = matrix[first*n+i]; // save top // left -> top matrix[first*n+i] = matrix[(last-offset)*n+first]; // bottom -> left matrix[(last-offset)*n+first] = matrix[last*n+(last-offset)]; // right -> bottom matrix[last*n+(last-offset)] = matrix[i*n+last]; // top -> right matrix[i*n+last] = top; // right <- saved top } } } void rotate_matrix_serial(float *matrix, int n) { for (int layer = 0; layer < n / 2; ++layer) { int first = layer; int last = n - 1 - layer; for(int i = first; i < last; ++i) { int offset = i - first; float top = matrix[first*n+i]; // save top // left -> top matrix[first*n+i] = matrix[(last-offset)*n+first]; // bottom -> left matrix[(last-offset)*n+first] = matrix[last*n+(last-offset)]; // right -> bottom matrix[last*n+(last-offset)] = matrix[i*n+last]; // top -> right matrix[i*n+last] = top; // right <- saved top } } } int main(int argc, char** argv) { if (argc != 3) { printf("Usage: %s <matrix size> <repeat>\n", argv[0]); return 1; } const int n = atoi(argv[1]); const int repeat = atoi(argv[2]); float *serial_res = (float*) aligned_alloc(1024, n*n*sizeof(float)); float *parallel_res = (float*) aligned_alloc(1024, n*n*sizeof(float)); for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) serial_res[i*n+j] = parallel_res[i*n+j] = i*n+j; for (int i = 0; i < repeat; i++) { rotate_matrix_serial(serial_res, n); } float *d_parallel_res; hipMalloc((void**)&d_parallel_res, n*n*sizeof(float)); hipMemcpy(d_parallel_res, parallel_res, n*n*sizeof(float), hipMemcpyHostToDevice); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { rotate_matrix_parallel<<<(n/2+255)/256, 256>>>(d_parallel_res, n); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat); hipMemcpy(parallel_res, d_parallel_res, n*n*sizeof(float), hipMemcpyDeviceToHost); bool ok = true; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (serial_res[i*n+j] != parallel_res[i*n+j]) { ok = false; break; } } } printf("%s\n", ok ? "PASS" : "FAIL"); free(serial_res); free(parallel_res); hipFree(d_parallel_res); return 0; }
381f7e477596228ab68e21728cbc6b4b80d46f74.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hemisphere_implement.h" #include "brdf_common.h" __global__ void hemisphere_kernel(float3* pos, unsigned width) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; float3 pin = pos[y*width+x]; pos[y*width+x] = normalize(pin); } extern "C" void hemisphere(float3 *pos, unsigned numVertices) { dim3 block(8, 8, 1); unsigned width = 128; unsigned height = numVertices / width; dim3 grid(width / block.x, height / block.y, 1); hipLaunchKernelGGL(( hemisphere_kernel), dim3(grid), dim3(block), 0, 0, pos, width); }
381f7e477596228ab68e21728cbc6b4b80d46f74.cu
#include "hemisphere_implement.h" #include "brdf_common.h" __global__ void hemisphere_kernel(float3* pos, unsigned width) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; float3 pin = pos[y*width+x]; pos[y*width+x] = normalize(pin); } extern "C" void hemisphere(float3 *pos, unsigned numVertices) { dim3 block(8, 8, 1); unsigned width = 128; unsigned height = numVertices / width; dim3 grid(width / block.x, height / block.y, 1); hemisphere_kernel<<< grid, block>>>(pos, width); }
e23cdb15af3ed26c2798cd3573c7e5c2c1c9ad85.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/JitLoops.cuh> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> namespace at { namespace native { template<typename scalar_t> struct AbsFunctor { __device__ __forceinline__ scalar_t operator() (const scalar_t a) const { return std::abs(a); } }; const char abs_name[] = "abs_kernel"; void abs_kernel_cuda(TensorIteratorBase& iter) { auto dtype = iter.dtype(); if (at::isComplexType(dtype)) { #if AT_USE_JITERATOR() static const auto abs_string = jiterator_stringify( template <typename T> T abs_kernel(T x) { return std::abs(x); }); AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "abs_cuda", [&]() { jitted_gpu_kernel< /*name=*/abs_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, abs_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "abs_cuda", [&]() { using opmath_t = at::opmath_type<scalar_t>; gpu_kernel(iter, AbsFunctor<opmath_t>()); }); #endif } else { AT_DISPATCH_ALL_TYPES_AND3( ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, iter.dtype(), "abs_cuda", [&]() { gpu_kernel(iter, AbsFunctor<scalar_t>()); }); } } REGISTER_DISPATCH(abs_stub, &abs_kernel_cuda); }} // namespace at::native
e23cdb15af3ed26c2798cd3573c7e5c2c1c9ad85.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> namespace at { namespace native { template<typename scalar_t> struct AbsFunctor { __device__ __forceinline__ scalar_t operator() (const scalar_t a) const { return std::abs(a); } }; const char abs_name[] = "abs_kernel"; void abs_kernel_cuda(TensorIteratorBase& iter) { auto dtype = iter.dtype(); if (at::isComplexType(dtype)) { #if AT_USE_JITERATOR() static const auto abs_string = jiterator_stringify( template <typename T> T abs_kernel(T x) { return std::abs(x); }); AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "abs_cuda", [&]() { jitted_gpu_kernel< /*name=*/abs_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, abs_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "abs_cuda", [&]() { using opmath_t = at::opmath_type<scalar_t>; gpu_kernel(iter, AbsFunctor<opmath_t>()); }); #endif } else { AT_DISPATCH_ALL_TYPES_AND3( ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, iter.dtype(), "abs_cuda", [&]() { gpu_kernel(iter, AbsFunctor<scalar_t>()); }); } } REGISTER_DISPATCH(abs_stub, &abs_kernel_cuda); }} // namespace at::native
e3f9ac7f36279a83fcfea0640371f849d6828607.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* GPU OPTIMIZED MONTE CARLO (GOMC) 2.75 Copyright (C) 2022 GOMC Group A copy of the MIT License can be found in License.txt along with this program, also can be found at <https://opensource.org/licenses/MIT>. ********************************************************************************/ #ifdef GOMC_CUDA #include "TransformParticlesCUDAKernel.cuh" #include "CalculateMinImageCUDAKernel.cuh" #include "CUDAMemoryManager.cuh" #include "Random123/boxmuller.hpp" #define MIN_FORCE 1E-12 #define MAX_FORCE 30 __device__ inline double randomGPU(unsigned int counter, ulong step, ulong seed) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; RNG::ctr_type r = philox4x64(c, k); return r123::u01<double>(r[0]); } __device__ inline double3 randomCoordsGPU(unsigned int counter, unsigned int key, ulong step, ulong seed) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; c[1] = key; RNG::ctr_type r = philox4x64(c, k); double3 r01; r01.x = r123::u01<double>(r[0]); r01.y = r123::u01<double>(r[1]); r01.z = r123::u01<double>(r[2]); return r01; } __device__ inline double randomGaussianGPU(unsigned int counter, ulong step, ulong seed, double mean, double stdDev) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; RNG::ctr_type r = philox4x64(c, k); double2 normal2 = r123::boxmuller(r[0], r[1]); double shiftedVal = mean + normal2.x * stdDev; return shiftedVal; } __device__ inline double3 randomGaussianCoordsGPU(unsigned int counter, unsigned int key, ulong step, ulong seed, double mean, double stdDev) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; c[1] = key; RNG::ctr_type r = philox4x64(c, k); double2 normal1 = r123::boxmuller(r[0], r[1]); double2 normal2 = r123::boxmuller(r[2], r[3]); double3 normals = make_double3(mean + normal1.x * stdDev, mean + normal1.y * stdDev, mean + normal2.x * stdDev); return normals; } __device__ inline double SymRandomGPU(unsigned int counter, unsigned int key, ulong step, ulong seed) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; c[1] = key; RNG::ctr_type r = philox4x64(c, k); double r01 = r123::uneg11<double>(r[0]); return r01; } __device__ inline double3 SymRandomCoordsGPU(unsigned int counter, unsigned int key, ulong step, ulong seed) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; c[1] = key; RNG::ctr_type r = philox4x64(c, k); double3 r01; r01.x = r123::uneg11<double>(r[0]); r01.y = r123::uneg11<double>(r[1]); r01.z = r123::uneg11<double>(r[2]); return r01; } //Returns a uniformly random point on the unit sphere __device__ inline double3 RandomCoordsOnSphereGPU(unsigned int counter, unsigned int key, ulong step, ulong seed) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; c[1] = key; RNG::ctr_type r = philox4x64(c, k); //picking phi uniformly will cluster points at poles //pick u = cos(phi) uniformly instead //start from r[1] because I used r[0] in GetSymRandom when called in multiparticle double u = r123::uneg11<double>(r[1]); // theta must be [0, 2pi) ! double theta = 2.0 * M_PI * r123::u01<double>(r[2]); double sintheta, costheta; sincos(theta, &sintheta, &costheta); double rootTerm = sqrt(1.0 - u * u); return make_double3(rootTerm * costheta, rootTerm * sintheta, u); } __device__ inline void ApplyRotation(double &x, double &y, double &z, double comx, double comy, double comz, double theta, double3 rotvec, double axx, double axy, double axz, int gpu_nonOrth, double *gpu_cell_x, double *gpu_cell_y, double *gpu_cell_z, double *gpu_Invcell_x, double *gpu_Invcell_y, double *gpu_Invcell_z) { double matrix[3][3], cross[3][3], tensor[3][3]; // build cross cross[0][0] = 0.0; cross[0][1] = -rotvec.z; cross[0][2] = rotvec.y; cross[1][0] = rotvec.z; cross[1][1] = 0.0; cross[1][2] = -rotvec.x; cross[2][0] = -rotvec.y; cross[2][1] = rotvec.x; cross[2][2] = 0.0; // build tensor for(int i = 0; i < 3; i++) { tensor[0][i] = rotvec.x; tensor[1][i] = rotvec.y; tensor[2][i] = rotvec.z; } for(int i = 0; i < 3; i++) { tensor[i][0] *= rotvec.x; tensor[i][1] *= rotvec.y; tensor[i][2] *= rotvec.z; } // build matrix double s, c; sincos(theta, &s, &c); for(int i = 0; i < 3; i++) { for(int j = 0; j < 3; j++) { matrix[i][j] = 0.0; } matrix[i][i] = c; } for(int i = 0; i < 3; i++) { for(int j = 0; j < 3; j++) { matrix[i][j] += s * cross[i][j] + (1 - c) * tensor[i][j]; } } // unwrap molecule double3 coor = make_double3(x, y, z); double3 com = make_double3(comx, comy, comz); double3 axes = make_double3(axx, axy, axz); double3 halfAx = make_double3(axx * 0.5, axy * 0.5, axz * 0.5); if (gpu_nonOrth) UnwrapPBCNonOrth3(coor, com, axes, halfAx, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); else UnwrapPBC3(coor, com, axes, halfAx); // move particle to zero coor.x -= com.x; coor.y -= com.y; coor.z -= com.z; // rotate double3 newcoor; newcoor.x = matrix[0][0] * coor.x + matrix[0][1] * coor.y + matrix[0][2] * coor.z; newcoor.y = matrix[1][0] * coor.x + matrix[1][1] * coor.y + matrix[1][2] * coor.z; newcoor.z = matrix[2][0] * coor.x + matrix[2][1] * coor.y + matrix[2][2] * coor.z; coor.x = newcoor.x + com.x; coor.y = newcoor.y + com.y; coor.z = newcoor.z + com.z; // wrap again if (gpu_nonOrth) WrapPBCNonOrth3(coor, axes, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); else WrapPBC3(coor, axes); x = coor.x; y = coor.y; z = coor.z; } void CallTranslateParticlesGPU(VariablesCUDA *vars, const std::vector<int8_t> &isMoleculeInvolved, int box, double t_max, double *mForcex, double *mForcey, double *mForcez, std::vector<int> &inForceRange, ulong step, unsigned int key, ulong seed, const std::vector<int> &particleMol, int atomCount, int molCount, double xAxes, double yAxes, double zAxes, XYZArray &newMolPos, XYZArray &newCOMs, double lambdaBETA, XYZArray &t_k, XYZArray &molForceRecRef) { int8_t *gpu_isMoleculeInvolved; int threadsPerBlock = 256; int blocksPerGrid = (int)(atomCount / threadsPerBlock) + 1; int *gpu_particleMol; CUMALLOC((void **) &gpu_isMoleculeInvolved, isMoleculeInvolved.size() * sizeof(int8_t)); CUMALLOC((void**) &gpu_particleMol, particleMol.size() * sizeof(int)); hipMemcpy(vars->gpu_mForcex, mForcex, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mForcey, mForcey, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mForcez, mForcez, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mForceRecx, molForceRecRef.x, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mForceRecy, molForceRecRef.y, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mForceRecz, molForceRecRef.z, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(gpu_particleMol, &particleMol[0], particleMol.size() * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(gpu_isMoleculeInvolved, &isMoleculeInvolved[0], isMoleculeInvolved.size() * sizeof(int8_t), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_x, newMolPos.x, atomCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_y, newMolPos.y, atomCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_z, newMolPos.z, atomCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_comx, newCOMs.x, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_comy, newCOMs.y, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_comz, newCOMs.z, molCount * sizeof(double), hipMemcpyHostToDevice); checkLastErrorCUDA(__FILE__, __LINE__); hipLaunchKernelGGL(( TranslateParticlesKernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, molCount, t_max, vars->gpu_mForcex, vars->gpu_mForcey, vars->gpu_mForcez, vars->gpu_inForceRange, step, key, seed, vars->gpu_x, vars->gpu_y, vars->gpu_z, gpu_particleMol, atomCount, xAxes, yAxes, zAxes, vars->gpu_comx, vars->gpu_comy, vars->gpu_comz, vars->gpu_cell_x[box], vars->gpu_cell_y[box], vars->gpu_cell_z[box], vars->gpu_Invcell_x[box], vars->gpu_Invcell_y[box], vars->gpu_Invcell_z[box], vars->gpu_nonOrth, lambdaBETA, vars->gpu_t_k_x, vars->gpu_t_k_y, vars->gpu_t_k_z, gpu_isMoleculeInvolved, vars->gpu_mForceRecx, vars->gpu_mForceRecy, vars->gpu_mForceRecz); hipDeviceSynchronize(); checkLastErrorCUDA(__FILE__, __LINE__); hipMemcpy(newMolPos.x, vars->gpu_x, atomCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newMolPos.y, vars->gpu_y, atomCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newMolPos.z, vars->gpu_z, atomCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newCOMs.x, vars->gpu_comx, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newCOMs.y, vars->gpu_comy, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newCOMs.z, vars->gpu_comz, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(t_k.x, vars->gpu_t_k_x, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(t_k.y, vars->gpu_t_k_y, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(t_k.z, vars->gpu_t_k_z, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(&inForceRange[0], vars->gpu_inForceRange, molCount * sizeof(int), hipMemcpyDeviceToHost); CUFREE(gpu_isMoleculeInvolved); CUFREE(gpu_particleMol); checkLastErrorCUDA(__FILE__, __LINE__); } void CallRotateParticlesGPU(VariablesCUDA *vars, const std::vector<int8_t> &isMoleculeInvolved, int box, double r_max, double *mTorquex, double *mTorquey, double *mTorquez, std::vector<int> &inForceRange, ulong step, unsigned int key, ulong seed, const std::vector<int> &particleMol, int atomCount, int molCount, double xAxes, double yAxes, double zAxes, XYZArray &newMolPos, XYZArray &newCOMs, double lambdaBETA, XYZArray &r_k) { int8_t *gpu_isMoleculeInvolved; int threadsPerBlock = 256; int blocksPerGrid = (int)(atomCount / threadsPerBlock) + 1; int *gpu_particleMol; CUMALLOC((void **) &gpu_isMoleculeInvolved, isMoleculeInvolved.size() * sizeof(int8_t)); CUMALLOC((void**) &gpu_particleMol, particleMol.size() * sizeof(int)); hipMemcpy(vars->gpu_mTorquex, mTorquex, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mTorquey, mTorquey, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mTorquez, mTorquez, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(gpu_particleMol, &particleMol[0], particleMol.size() * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_x, newMolPos.x, atomCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_y, newMolPos.y, atomCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_z, newMolPos.z, atomCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_comx, newCOMs.x, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_comy, newCOMs.y, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_comz, newCOMs.z, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(gpu_isMoleculeInvolved, &isMoleculeInvolved[0], isMoleculeInvolved.size() * sizeof(int8_t), hipMemcpyHostToDevice); hipLaunchKernelGGL(( RotateParticlesKernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, molCount, r_max, vars->gpu_mTorquex, vars->gpu_mTorquey, vars->gpu_mTorquez, vars->gpu_inForceRange, step, key, seed, vars->gpu_x, vars->gpu_y, vars->gpu_z, gpu_particleMol, atomCount, xAxes, yAxes, zAxes, vars->gpu_comx, vars->gpu_comy, vars->gpu_comz, vars->gpu_cell_x[box], vars->gpu_cell_y[box], vars->gpu_cell_z[box], vars->gpu_Invcell_x[box], vars->gpu_Invcell_y[box], vars->gpu_Invcell_z[box], vars->gpu_nonOrth, lambdaBETA, vars->gpu_r_k_x, vars->gpu_r_k_y, vars->gpu_r_k_z, gpu_isMoleculeInvolved); hipDeviceSynchronize(); checkLastErrorCUDA(__FILE__, __LINE__); hipMemcpy(newMolPos.x, vars->gpu_x, atomCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newMolPos.y, vars->gpu_y, atomCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newMolPos.z, vars->gpu_z, atomCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(r_k.x, vars->gpu_r_k_x, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(r_k.y, vars->gpu_r_k_y, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(r_k.z, vars->gpu_r_k_z, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(&inForceRange[0], vars->gpu_inForceRange, molCount * sizeof(int), hipMemcpyDeviceToHost); CUFREE(gpu_isMoleculeInvolved); CUFREE(gpu_particleMol); checkLastErrorCUDA(__FILE__, __LINE__); } __global__ void TranslateParticlesKernel(unsigned int numberOfMolecules, double t_max, double *molForcex, double *molForcey, double *molForcez, int *gpu_inForceRange, ulong step, unsigned int key, ulong seed, double *gpu_x, double *gpu_y, double *gpu_z, int *gpu_particleMol, int atomCount, double xAxes, double yAxes, double zAxes, double *gpu_comx, double *gpu_comy, double *gpu_comz, double *gpu_cell_x, double *gpu_cell_y, double *gpu_cell_z, double *gpu_Invcell_x, double *gpu_Invcell_y, double *gpu_Invcell_z, int *gpu_nonOrth, double lambdaBETA, double *gpu_t_k_x, double *gpu_t_k_y, double *gpu_t_k_z, int8_t *gpu_isMoleculeInvolved, double *gpu_mForceRecx, double *gpu_mForceRecy, double *gpu_mForceRecz) { int atomNumber = blockIdx.x * blockDim.x + threadIdx.x; if(atomNumber >= atomCount) return; int molIndex = gpu_particleMol[atomNumber]; if(!gpu_isMoleculeInvolved[molIndex]) return; bool updateMol = atomNumber == 0 || (gpu_particleMol[atomNumber] != gpu_particleMol[atomNumber - 1]); // This section calculates the amount of shift double lbfx = (molForcex[molIndex] + gpu_mForceRecx[molIndex]) * lambdaBETA; double lbfy = (molForcey[molIndex] + gpu_mForceRecy[molIndex]) * lambdaBETA; double lbfz = (molForcez[molIndex] + gpu_mForceRecz[molIndex]) * lambdaBETA; double lbmaxx = lbfx * t_max; double lbmaxy = lbfy * t_max; double lbmaxz = lbfz * t_max; double shiftx, shifty, shiftz; bool forceInRange; forceInRange = (std::abs(lbmaxx) > MIN_FORCE && std::abs(lbmaxx) < MAX_FORCE && std::abs(lbmaxy) > MIN_FORCE && std::abs(lbmaxy) < MAX_FORCE && std::abs(lbmaxz) > MIN_FORCE && std::abs(lbmaxz) < MAX_FORCE); if (forceInRange) { double3 randnums = randomCoordsGPU(molIndex, key, step, seed); shiftx = log(exp(-1.0 * lbmaxx) + 2 * randnums.x * sinh(lbmaxx)) / lbfx; shifty = log(exp(-1.0 * lbmaxy) + 2 * randnums.y * sinh(lbmaxy)) / lbfy; shiftz = log(exp(-1.0 * lbmaxz) + 2 * randnums.z * sinh(lbmaxz)) / lbfz; } else { double3 randnums = SymRandomCoordsGPU(molIndex, key, step, seed); shiftx = t_max * randnums.x; shifty = t_max * randnums.y; shiftz = t_max * randnums.z; } // perform the shift on the coordinates double3 coor = make_double3(gpu_x[atomNumber] + shiftx, gpu_y[atomNumber] + shifty, gpu_z[atomNumber] + shiftz); // rewrapping double3 axes = make_double3(xAxes, yAxes, zAxes); if (gpu_nonOrth[0]) WrapPBCNonOrth3(coor, axes, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); else WrapPBC3(coor, axes); gpu_x[atomNumber] = coor.x; gpu_y[atomNumber] = coor.y; gpu_z[atomNumber] = coor.z; //update the CoM just once per molecule if(updateMol) { double3 com = make_double3(gpu_comx[molIndex] + shiftx, gpu_comy[molIndex] + shifty, gpu_comz[molIndex] + shiftz); if (gpu_nonOrth[0]) WrapPBCNonOrth3(com, axes, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); else WrapPBC3(com, axes); gpu_comx[molIndex] = com.x; gpu_comy[molIndex] = com.y; gpu_comz[molIndex] = com.z; gpu_t_k_x[molIndex] = shiftx; gpu_t_k_y[molIndex] = shifty; gpu_t_k_z[molIndex] = shiftz; gpu_inForceRange[molIndex] = forceInRange; } } __global__ void RotateParticlesKernel(unsigned int numberOfMolecules, double r_max, double *molTorquex, double *molTorquey, double *molTorquez, int *gpu_inForceRange, ulong step, unsigned int key, ulong seed, double *gpu_x, double *gpu_y, double *gpu_z, int *gpu_particleMol, int atomCount, double xAxes, double yAxes, double zAxes, double *gpu_comx, double *gpu_comy, double *gpu_comz, double *gpu_cell_x, double *gpu_cell_y, double *gpu_cell_z, double *gpu_Invcell_x, double *gpu_Invcell_y, double *gpu_Invcell_z, int *gpu_nonOrth, double lambdaBETA, double *gpu_r_k_x, double *gpu_r_k_y, double *gpu_r_k_z, int8_t *gpu_isMoleculeInvolved) { int atomNumber = blockIdx.x * blockDim.x + threadIdx.x; if(atomNumber >= atomCount) return; int molIndex = gpu_particleMol[atomNumber]; if(!gpu_isMoleculeInvolved[molIndex]) return; bool updateMol = atomNumber == 0 || (gpu_particleMol[atomNumber] != gpu_particleMol[atomNumber - 1]); // This section calculates the amount of rotation double lbtx = molTorquex[molIndex] * lambdaBETA; double lbty = molTorquey[molIndex] * lambdaBETA; double lbtz = molTorquez[molIndex] * lambdaBETA; double lbmaxx = lbtx * r_max; double lbmaxy = lbty * r_max; double lbmaxz = lbtz * r_max; double rotx, roty, rotz, theta; double3 rotvec; bool forceInRange; forceInRange = (std::abs(lbmaxx) > MIN_FORCE && std::abs(lbmaxx) < MAX_FORCE && std::abs(lbmaxy) > MIN_FORCE && std::abs(lbmaxy) < MAX_FORCE && std::abs(lbmaxz) > MIN_FORCE && std::abs(lbmaxz) < MAX_FORCE); if (forceInRange) { double3 randnums = randomCoordsGPU(molIndex, key, step, seed); rotx = log(exp(-1.0 * lbmaxx) + 2 * randnums.x * sinh(lbmaxx)) / lbtx; roty = log(exp(-1.0 * lbmaxy) + 2 * randnums.y * sinh(lbmaxy)) / lbty; rotz = log(exp(-1.0 * lbmaxz) + 2 * randnums.z * sinh(lbmaxz)) / lbtz; theta = sqrt(rotx * rotx + roty * roty + rotz * rotz); rotvec = make_double3(rotx * (1.0/theta), roty * (1.0/theta), rotz * (1.0/theta)); } else { double3 randnums = RandomCoordsOnSphereGPU(molIndex, key, step, seed); //These values are ignored if !forceInRange so just initialize to zero. rotx = 0.0; roty = 0.0; rotz = 0.0; theta = r_max * SymRandomGPU(molIndex, key, step, seed); rotvec = randnums; } if(updateMol) { gpu_r_k_x[molIndex] = rotx; gpu_r_k_y[molIndex] = roty; gpu_r_k_z[molIndex] = rotz; gpu_inForceRange[molIndex] = forceInRange; } // perform the rotation on the coordinates ApplyRotation(gpu_x[atomNumber], gpu_y[atomNumber], gpu_z[atomNumber], gpu_comx[molIndex], gpu_comy[molIndex], gpu_comz[molIndex], theta, rotvec, xAxes, yAxes, zAxes, *gpu_nonOrth, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); } // CUDA implementation of MultiParticle Brownian transformation void BrownianMotionRotateParticlesGPU( VariablesCUDA *vars, const std::vector<unsigned int> &moleculeInvolved, XYZArray &mTorque, XYZArray &newMolPos, XYZArray &newCOMs, XYZArray &r_k, const XYZ &boxAxes, const double BETA, const double r_max, ulong step, unsigned int key, ulong seed, const int box, const bool isOrthogonal, int *kill) { int atomCount = newMolPos.Count(); int molCount = newCOMs.Count(); int molCountInBox = moleculeInvolved.size(); int *gpu_moleculeInvolved; // Each block would handle one molecule int threadsPerBlock = 32; int blocksPerGrid = molCountInBox; CUMALLOC((void **) &gpu_moleculeInvolved, molCountInBox * sizeof(int)); hipMemcpy(vars->gpu_mTorquex, mTorque.x, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mTorquey, mTorque.y, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mTorquez, mTorque.z, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_x, newMolPos.x, atomCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_y, newMolPos.y, atomCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_z, newMolPos.z, atomCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_comx, newCOMs.x, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_comy, newCOMs.y, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_comz, newCOMs.z, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(gpu_moleculeInvolved, &moleculeInvolved[0], molCountInBox * sizeof(int), hipMemcpyHostToDevice); double3 axis = make_double3(boxAxes.x, boxAxes.y, boxAxes.z); double3 halfAx = make_double3(boxAxes.x * 0.5, boxAxes.y * 0.5, boxAxes.z * 0.5); if (isOrthogonal) hipLaunchKernelGGL(( BrownianMotionRotateKernel<true>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, vars->gpu_startAtomIdx, vars->gpu_x, vars->gpu_y, vars->gpu_z, vars->gpu_mTorquex, vars->gpu_mTorquey, vars->gpu_mTorquez, vars->gpu_comx, vars->gpu_comy, vars->gpu_comz, vars->gpu_r_k_x, vars->gpu_r_k_y, vars->gpu_r_k_z, gpu_moleculeInvolved, vars->gpu_cell_x[box], vars->gpu_cell_y[box], vars->gpu_cell_z[box], vars->gpu_Invcell_x[box], vars->gpu_Invcell_y[box], vars->gpu_Invcell_z[box], axis, halfAx, atomCount, r_max, step, key, seed, BETA, kill); else hipLaunchKernelGGL(( BrownianMotionRotateKernel<true>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, vars->gpu_startAtomIdx, vars->gpu_x, vars->gpu_y, vars->gpu_z, vars->gpu_mTorquex, vars->gpu_mTorquey, vars->gpu_mTorquez, vars->gpu_comx, vars->gpu_comy, vars->gpu_comz, vars->gpu_r_k_x, vars->gpu_r_k_y, vars->gpu_r_k_z, gpu_moleculeInvolved, vars->gpu_cell_x[box], vars->gpu_cell_y[box], vars->gpu_cell_z[box], vars->gpu_Invcell_x[box], vars->gpu_Invcell_y[box], vars->gpu_Invcell_z[box], axis, halfAx, atomCount, r_max, step, key, seed, BETA, kill); hipDeviceSynchronize(); checkLastErrorCUDA(__FILE__, __LINE__); hipMemcpy(newMolPos.x, vars->gpu_x, atomCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newMolPos.y, vars->gpu_y, atomCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newMolPos.z, vars->gpu_z, atomCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(r_k.x, vars->gpu_r_k_x, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(r_k.y, vars->gpu_r_k_y, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(r_k.z, vars->gpu_r_k_z, molCount * sizeof(double), hipMemcpyDeviceToHost); CUFREE(gpu_moleculeInvolved); checkLastErrorCUDA(__FILE__, __LINE__); } template<const bool isOrthogonal> __global__ void BrownianMotionRotateKernel( int *startAtomIdx, double *gpu_x, double *gpu_y, double *gpu_z, double *molTorquex, double *molTorquey, double *molTorquez, double *gpu_comx, double *gpu_comy, double *gpu_comz, double *gpu_r_k_x, double *gpu_r_k_y, double *gpu_r_k_z, int *moleculeInvolved, double *gpu_cell_x, double *gpu_cell_y, double *gpu_cell_z, double *gpu_Invcell_x, double *gpu_Invcell_y, double *gpu_Invcell_z, double3 axis, double3 halfAx, int atomCount, double r_max, ulong step, unsigned int key, ulong seed, double BETA, int *kill) { //Each block takes care of one molecule int molIndex = moleculeInvolved[blockIdx.x]; int startIdx = startAtomIdx[molIndex]; int endIdx = startAtomIdx[molIndex + 1]; int atomIdx; __shared__ double matrix[3][3]; __shared__ double3 com; // thread 0 will set up the matrix and update the gpu_r_k if(threadIdx.x == 0) { com = make_double3(gpu_comx[molIndex], gpu_comy[molIndex], gpu_comz[molIndex]); // This section calculates the amount of rotation double stdDev = sqrt(2.0 * r_max); double btm_x = molTorquex[molIndex] * BETA * r_max; double btm_y = molTorquey[molIndex] * BETA * r_max; double btm_z = molTorquez[molIndex] * BETA * r_max; double3 randnums = randomGaussianCoordsGPU(molIndex, key, step, seed, 0.0, stdDev); double rot_x = btm_x + randnums.x; double rot_y = btm_y + randnums.y; double rot_z = btm_z + randnums.z; // update the trial torque gpu_r_k_x[molIndex] = rot_x; gpu_r_k_y[molIndex] = rot_y; gpu_r_k_z[molIndex] = rot_z; //check for bad configuration if(!isfinite(rot_x + rot_y + rot_z)) { atomicAdd(kill, 1); } // build rotation matrix double cross[3][3], tensor[3][3]; double rotLen = sqrt(rot_x * rot_x + rot_y * rot_y + rot_z * rot_z); double axisx = rot_x * (1.0 / rotLen); double axisy = rot_y * (1.0 / rotLen); double axisz = rot_z * (1.0 / rotLen); // build cross cross[0][0] = 0.0; cross[0][1] = -axisz; cross[0][2] = axisy; cross[1][0] = axisz; cross[1][1] = 0.0; cross[1][2] = -axisx; cross[2][0] = -axisy; cross[2][1] = axisx; cross[2][2] = 0.0; // build tensor int i, j; for(i = 0; i < 3; ++i) { tensor[0][i] = axisx; tensor[1][i] = axisy; tensor[2][i] = axisz; } for(i = 0; i < 3; ++i) { tensor[i][0] *= axisx; tensor[i][1] *= axisy; tensor[i][2] *= axisz; } // build matrix double s, c; sincos(rotLen, &s, &c); for(i = 0; i < 3; ++i) { for(j = 0; j < 3; ++j) { matrix[i][j] = 0.0; } matrix[i][i] = c; } for(i = 0; i < 3; ++i) { for(j = 0; j < 3; ++j) { matrix[i][j] += s * cross[i][j] + (1 - c) * tensor[i][j]; } } } __syncthreads(); // use stride of blockDim.x, which is 32 // each thread handles one atom rotation for(atomIdx = startIdx + threadIdx.x; atomIdx < endIdx; atomIdx += blockDim.x) { double3 coor = make_double3(gpu_x[atomIdx], gpu_y[atomIdx], gpu_z[atomIdx]); // unwrap molecule if(isOrthogonal) UnwrapPBC3(coor, com, axis, halfAx); else UnwrapPBCNonOrth3(coor, com, axis, halfAx, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); // move COM of molecule to zero coor.x -= com.x; coor.y -= com.y; coor.z -= com.z; // rotate double newx = matrix[0][0] * coor.x + matrix[0][1] * coor.y + matrix[0][2] * coor.z; double newy = matrix[1][0] * coor.x + matrix[1][1] * coor.y + matrix[1][2] * coor.z; double newz = matrix[2][0] * coor.x + matrix[2][1] * coor.y + matrix[2][2] * coor.z; // move back to com coor.x = newx + com.x; coor.y = newy + com.y; coor.z = newz + com.z; // wrap again if(isOrthogonal) WrapPBC3(coor, axis); else WrapPBCNonOrth3(coor, axis, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); // update the new position gpu_x[atomIdx] = coor.x; gpu_y[atomIdx] = coor.y; gpu_z[atomIdx] = coor.z; } } void BrownianMotionTranslateParticlesGPU( VariablesCUDA *vars, const std::vector<unsigned int> &moleculeInvolved, XYZArray &mForce, XYZArray &mForceRec, XYZArray &newMolPos, XYZArray &newCOMs, XYZArray &t_k, const XYZ &boxAxes, const double BETA, const double t_max, ulong step, unsigned int key, ulong seed, const int box, const bool isOrthogonal, int *kill) { int atomCount = newMolPos.Count(); int molCount = newCOMs.Count(); int molCountInBox = moleculeInvolved.size(); int *gpu_moleculeInvolved; // Each block would handle one molecule int threadsPerBlock = 32; int blocksPerGrid = molCountInBox; CUMALLOC((void **) &gpu_moleculeInvolved, molCountInBox * sizeof(int)); hipMemcpy(vars->gpu_mForcex, mForce.x, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mForcey, mForce.y, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mForcez, mForce.z, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mForceRecx, mForceRec.x, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mForceRecy, mForceRec.y, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_mForceRecz, mForceRec.z, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_x, newMolPos.x, atomCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_y, newMolPos.y, atomCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_z, newMolPos.z, atomCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_comx, newCOMs.x, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_comy, newCOMs.y, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vars->gpu_comz, newCOMs.z, molCount * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(gpu_moleculeInvolved, &moleculeInvolved[0], molCountInBox * sizeof(int), hipMemcpyHostToDevice); double3 axis = make_double3(boxAxes.x, boxAxes.y, boxAxes.z); double3 halfAx = make_double3(boxAxes.x * 0.5, boxAxes.y * 0.5, boxAxes.z * 0.5); if (isOrthogonal) hipLaunchKernelGGL(( BrownianMotionTranslateKernel<true>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, vars->gpu_startAtomIdx, vars->gpu_x, vars->gpu_y, vars->gpu_z, vars->gpu_mForcex, vars->gpu_mForcey, vars->gpu_mForcez, vars->gpu_mForceRecx, vars->gpu_mForceRecy, vars->gpu_mForceRecz, vars->gpu_comx, vars->gpu_comy, vars->gpu_comz, vars->gpu_t_k_x, vars->gpu_t_k_y, vars->gpu_t_k_z, gpu_moleculeInvolved, vars->gpu_cell_x[box], vars->gpu_cell_y[box], vars->gpu_cell_z[box], vars->gpu_Invcell_x[box], vars->gpu_Invcell_y[box], vars->gpu_Invcell_z[box], axis, halfAx, atomCount, t_max, step, key, seed, BETA, kill); else hipLaunchKernelGGL(( BrownianMotionTranslateKernel<false>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, vars->gpu_startAtomIdx, vars->gpu_x, vars->gpu_y, vars->gpu_z, vars->gpu_mForcex, vars->gpu_mForcey, vars->gpu_mForcez, vars->gpu_mForceRecx, vars->gpu_mForceRecy, vars->gpu_mForceRecz, vars->gpu_comx, vars->gpu_comy, vars->gpu_comz, vars->gpu_t_k_x, vars->gpu_t_k_y, vars->gpu_t_k_z, gpu_moleculeInvolved, vars->gpu_cell_x[box], vars->gpu_cell_y[box], vars->gpu_cell_z[box], vars->gpu_Invcell_x[box], vars->gpu_Invcell_y[box], vars->gpu_Invcell_z[box], axis, halfAx, atomCount, t_max, step, key, seed, BETA, kill); hipDeviceSynchronize(); checkLastErrorCUDA(__FILE__, __LINE__); hipMemcpy(newMolPos.x, vars->gpu_x, atomCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newMolPos.y, vars->gpu_y, atomCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newMolPos.z, vars->gpu_z, atomCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newCOMs.x, vars->gpu_comx, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newCOMs.y, vars->gpu_comy, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(newCOMs.z, vars->gpu_comz, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(t_k.x, vars->gpu_t_k_x, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(t_k.y, vars->gpu_t_k_y, molCount * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(t_k.z, vars->gpu_t_k_z, molCount * sizeof(double), hipMemcpyDeviceToHost); CUFREE(gpu_moleculeInvolved); checkLastErrorCUDA(__FILE__, __LINE__); } template<const bool isOrthogonal> __global__ void BrownianMotionTranslateKernel( int *startAtomIdx, double *gpu_x, double *gpu_y, double *gpu_z, double *molForcex, double *molForcey, double *molForcez, double *molForceRecx, double *molForceRecy, double *molForceRecz, double *gpu_comx, double *gpu_comy, double *gpu_comz, double *gpu_t_k_x, double *gpu_t_k_y, double *gpu_t_k_z, int *moleculeInvolved, double *gpu_cell_x, double *gpu_cell_y, double *gpu_cell_z, double *gpu_Invcell_x, double *gpu_Invcell_y, double *gpu_Invcell_z, double3 axis, double3 halfAx, int atomCount, double t_max, ulong step, unsigned int key, ulong seed, double BETA, int *kill) { //Each block takes care of one molecule int molIndex = moleculeInvolved[blockIdx.x]; int startIdx = startAtomIdx[molIndex]; int endIdx = startAtomIdx[molIndex + 1]; int atomIdx; __shared__ double3 shift; // thread 0 will calculate the shift vector and update COM and gpu_t_k if(threadIdx.x == 0) { double3 com = make_double3(gpu_comx[molIndex], gpu_comy[molIndex], gpu_comz[molIndex]); // This section calculates the amount of shift double stdDev = sqrt(2.0 * t_max); double bfm_x = (molForcex[molIndex] + molForceRecx[molIndex]) * BETA * t_max; double bfm_y = (molForcey[molIndex] + molForceRecy[molIndex]) * BETA * t_max; double bfm_z = (molForcez[molIndex] + molForceRecz[molIndex]) * BETA * t_max; double3 randnums = randomGaussianCoordsGPU(molIndex, key, step, seed, 0.0, stdDev); shift.x = bfm_x + randnums.x; shift.y = bfm_y + randnums.y; shift.z = bfm_z + randnums.z; // update the trial translate gpu_t_k_x[molIndex] = shift.x; gpu_t_k_y[molIndex] = shift.y; gpu_t_k_z[molIndex] = shift.z; // shift COM com.x += shift.x; com.y += shift.y; com.z += shift.z; // wrap COM if(isOrthogonal) WrapPBC3(com, axis); else WrapPBCNonOrth3(com, axis, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); //update COM gpu_comx[molIndex] = com.x; gpu_comy[molIndex] = com.y; gpu_comz[molIndex] = com.z; //check for bad configuration if(!isfinite(shift.x + shift.y + shift.z)) { atomicAdd(kill, 1); } else if (shift.x > halfAx.x || shift.y > halfAx.y || shift.z > halfAx.z) { atomicAdd(kill, 1); } } __syncthreads(); // use stride of blockDim.x, which is 32 // each thread handles one atom translation for(atomIdx = startIdx + threadIdx.x; atomIdx < endIdx; atomIdx += blockDim.x) { double3 coor = make_double3(gpu_x[atomIdx], gpu_y[atomIdx], gpu_z[atomIdx]); // translate the atom coor.x += shift.x; coor.y += shift.y; coor.z += shift.z; // wrap coordinate if(isOrthogonal) WrapPBC3(coor, axis); else WrapPBCNonOrth3(coor, axis, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); // update the new position gpu_x[atomIdx] = coor.x; gpu_y[atomIdx] = coor.y; gpu_z[atomIdx] = coor.z; } } #endif
e3f9ac7f36279a83fcfea0640371f849d6828607.cu
/******************************************************************************* GPU OPTIMIZED MONTE CARLO (GOMC) 2.75 Copyright (C) 2022 GOMC Group A copy of the MIT License can be found in License.txt along with this program, also can be found at <https://opensource.org/licenses/MIT>. ********************************************************************************/ #ifdef GOMC_CUDA #include "TransformParticlesCUDAKernel.cuh" #include "CalculateMinImageCUDAKernel.cuh" #include "CUDAMemoryManager.cuh" #include "Random123/boxmuller.hpp" #define MIN_FORCE 1E-12 #define MAX_FORCE 30 __device__ inline double randomGPU(unsigned int counter, ulong step, ulong seed) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; RNG::ctr_type r = philox4x64(c, k); return r123::u01<double>(r[0]); } __device__ inline double3 randomCoordsGPU(unsigned int counter, unsigned int key, ulong step, ulong seed) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; c[1] = key; RNG::ctr_type r = philox4x64(c, k); double3 r01; r01.x = r123::u01<double>(r[0]); r01.y = r123::u01<double>(r[1]); r01.z = r123::u01<double>(r[2]); return r01; } __device__ inline double randomGaussianGPU(unsigned int counter, ulong step, ulong seed, double mean, double stdDev) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; RNG::ctr_type r = philox4x64(c, k); double2 normal2 = r123::boxmuller(r[0], r[1]); double shiftedVal = mean + normal2.x * stdDev; return shiftedVal; } __device__ inline double3 randomGaussianCoordsGPU(unsigned int counter, unsigned int key, ulong step, ulong seed, double mean, double stdDev) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; c[1] = key; RNG::ctr_type r = philox4x64(c, k); double2 normal1 = r123::boxmuller(r[0], r[1]); double2 normal2 = r123::boxmuller(r[2], r[3]); double3 normals = make_double3(mean + normal1.x * stdDev, mean + normal1.y * stdDev, mean + normal2.x * stdDev); return normals; } __device__ inline double SymRandomGPU(unsigned int counter, unsigned int key, ulong step, ulong seed) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; c[1] = key; RNG::ctr_type r = philox4x64(c, k); double r01 = r123::uneg11<double>(r[0]); return r01; } __device__ inline double3 SymRandomCoordsGPU(unsigned int counter, unsigned int key, ulong step, ulong seed) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; c[1] = key; RNG::ctr_type r = philox4x64(c, k); double3 r01; r01.x = r123::uneg11<double>(r[0]); r01.y = r123::uneg11<double>(r[1]); r01.z = r123::uneg11<double>(r[2]); return r01; } //Returns a uniformly random point on the unit sphere __device__ inline double3 RandomCoordsOnSphereGPU(unsigned int counter, unsigned int key, ulong step, ulong seed) { RNG::ctr_type c = {{}}; RNG::ukey_type uk = {{}}; uk[0] = step; uk[1] = seed; RNG::key_type k = uk; c[0] = counter; c[1] = key; RNG::ctr_type r = philox4x64(c, k); //picking phi uniformly will cluster points at poles //pick u = cos(phi) uniformly instead //start from r[1] because I used r[0] in GetSymRandom when called in multiparticle double u = r123::uneg11<double>(r[1]); // theta must be [0, 2pi) ! double theta = 2.0 * M_PI * r123::u01<double>(r[2]); double sintheta, costheta; sincos(theta, &sintheta, &costheta); double rootTerm = sqrt(1.0 - u * u); return make_double3(rootTerm * costheta, rootTerm * sintheta, u); } __device__ inline void ApplyRotation(double &x, double &y, double &z, double comx, double comy, double comz, double theta, double3 rotvec, double axx, double axy, double axz, int gpu_nonOrth, double *gpu_cell_x, double *gpu_cell_y, double *gpu_cell_z, double *gpu_Invcell_x, double *gpu_Invcell_y, double *gpu_Invcell_z) { double matrix[3][3], cross[3][3], tensor[3][3]; // build cross cross[0][0] = 0.0; cross[0][1] = -rotvec.z; cross[0][2] = rotvec.y; cross[1][0] = rotvec.z; cross[1][1] = 0.0; cross[1][2] = -rotvec.x; cross[2][0] = -rotvec.y; cross[2][1] = rotvec.x; cross[2][2] = 0.0; // build tensor for(int i = 0; i < 3; i++) { tensor[0][i] = rotvec.x; tensor[1][i] = rotvec.y; tensor[2][i] = rotvec.z; } for(int i = 0; i < 3; i++) { tensor[i][0] *= rotvec.x; tensor[i][1] *= rotvec.y; tensor[i][2] *= rotvec.z; } // build matrix double s, c; sincos(theta, &s, &c); for(int i = 0; i < 3; i++) { for(int j = 0; j < 3; j++) { matrix[i][j] = 0.0; } matrix[i][i] = c; } for(int i = 0; i < 3; i++) { for(int j = 0; j < 3; j++) { matrix[i][j] += s * cross[i][j] + (1 - c) * tensor[i][j]; } } // unwrap molecule double3 coor = make_double3(x, y, z); double3 com = make_double3(comx, comy, comz); double3 axes = make_double3(axx, axy, axz); double3 halfAx = make_double3(axx * 0.5, axy * 0.5, axz * 0.5); if (gpu_nonOrth) UnwrapPBCNonOrth3(coor, com, axes, halfAx, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); else UnwrapPBC3(coor, com, axes, halfAx); // move particle to zero coor.x -= com.x; coor.y -= com.y; coor.z -= com.z; // rotate double3 newcoor; newcoor.x = matrix[0][0] * coor.x + matrix[0][1] * coor.y + matrix[0][2] * coor.z; newcoor.y = matrix[1][0] * coor.x + matrix[1][1] * coor.y + matrix[1][2] * coor.z; newcoor.z = matrix[2][0] * coor.x + matrix[2][1] * coor.y + matrix[2][2] * coor.z; coor.x = newcoor.x + com.x; coor.y = newcoor.y + com.y; coor.z = newcoor.z + com.z; // wrap again if (gpu_nonOrth) WrapPBCNonOrth3(coor, axes, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); else WrapPBC3(coor, axes); x = coor.x; y = coor.y; z = coor.z; } void CallTranslateParticlesGPU(VariablesCUDA *vars, const std::vector<int8_t> &isMoleculeInvolved, int box, double t_max, double *mForcex, double *mForcey, double *mForcez, std::vector<int> &inForceRange, ulong step, unsigned int key, ulong seed, const std::vector<int> &particleMol, int atomCount, int molCount, double xAxes, double yAxes, double zAxes, XYZArray &newMolPos, XYZArray &newCOMs, double lambdaBETA, XYZArray &t_k, XYZArray &molForceRecRef) { int8_t *gpu_isMoleculeInvolved; int threadsPerBlock = 256; int blocksPerGrid = (int)(atomCount / threadsPerBlock) + 1; int *gpu_particleMol; CUMALLOC((void **) &gpu_isMoleculeInvolved, isMoleculeInvolved.size() * sizeof(int8_t)); CUMALLOC((void**) &gpu_particleMol, particleMol.size() * sizeof(int)); cudaMemcpy(vars->gpu_mForcex, mForcex, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mForcey, mForcey, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mForcez, mForcez, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mForceRecx, molForceRecRef.x, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mForceRecy, molForceRecRef.y, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mForceRecz, molForceRecRef.z, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(gpu_particleMol, &particleMol[0], particleMol.size() * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(gpu_isMoleculeInvolved, &isMoleculeInvolved[0], isMoleculeInvolved.size() * sizeof(int8_t), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_x, newMolPos.x, atomCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_y, newMolPos.y, atomCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_z, newMolPos.z, atomCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_comx, newCOMs.x, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_comy, newCOMs.y, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_comz, newCOMs.z, molCount * sizeof(double), cudaMemcpyHostToDevice); checkLastErrorCUDA(__FILE__, __LINE__); TranslateParticlesKernel <<< blocksPerGrid, threadsPerBlock>>>(molCount, t_max, vars->gpu_mForcex, vars->gpu_mForcey, vars->gpu_mForcez, vars->gpu_inForceRange, step, key, seed, vars->gpu_x, vars->gpu_y, vars->gpu_z, gpu_particleMol, atomCount, xAxes, yAxes, zAxes, vars->gpu_comx, vars->gpu_comy, vars->gpu_comz, vars->gpu_cell_x[box], vars->gpu_cell_y[box], vars->gpu_cell_z[box], vars->gpu_Invcell_x[box], vars->gpu_Invcell_y[box], vars->gpu_Invcell_z[box], vars->gpu_nonOrth, lambdaBETA, vars->gpu_t_k_x, vars->gpu_t_k_y, vars->gpu_t_k_z, gpu_isMoleculeInvolved, vars->gpu_mForceRecx, vars->gpu_mForceRecy, vars->gpu_mForceRecz); cudaDeviceSynchronize(); checkLastErrorCUDA(__FILE__, __LINE__); cudaMemcpy(newMolPos.x, vars->gpu_x, atomCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newMolPos.y, vars->gpu_y, atomCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newMolPos.z, vars->gpu_z, atomCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newCOMs.x, vars->gpu_comx, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newCOMs.y, vars->gpu_comy, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newCOMs.z, vars->gpu_comz, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(t_k.x, vars->gpu_t_k_x, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(t_k.y, vars->gpu_t_k_y, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(t_k.z, vars->gpu_t_k_z, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&inForceRange[0], vars->gpu_inForceRange, molCount * sizeof(int), cudaMemcpyDeviceToHost); CUFREE(gpu_isMoleculeInvolved); CUFREE(gpu_particleMol); checkLastErrorCUDA(__FILE__, __LINE__); } void CallRotateParticlesGPU(VariablesCUDA *vars, const std::vector<int8_t> &isMoleculeInvolved, int box, double r_max, double *mTorquex, double *mTorquey, double *mTorquez, std::vector<int> &inForceRange, ulong step, unsigned int key, ulong seed, const std::vector<int> &particleMol, int atomCount, int molCount, double xAxes, double yAxes, double zAxes, XYZArray &newMolPos, XYZArray &newCOMs, double lambdaBETA, XYZArray &r_k) { int8_t *gpu_isMoleculeInvolved; int threadsPerBlock = 256; int blocksPerGrid = (int)(atomCount / threadsPerBlock) + 1; int *gpu_particleMol; CUMALLOC((void **) &gpu_isMoleculeInvolved, isMoleculeInvolved.size() * sizeof(int8_t)); CUMALLOC((void**) &gpu_particleMol, particleMol.size() * sizeof(int)); cudaMemcpy(vars->gpu_mTorquex, mTorquex, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mTorquey, mTorquey, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mTorquez, mTorquez, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(gpu_particleMol, &particleMol[0], particleMol.size() * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_x, newMolPos.x, atomCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_y, newMolPos.y, atomCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_z, newMolPos.z, atomCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_comx, newCOMs.x, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_comy, newCOMs.y, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_comz, newCOMs.z, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(gpu_isMoleculeInvolved, &isMoleculeInvolved[0], isMoleculeInvolved.size() * sizeof(int8_t), cudaMemcpyHostToDevice); RotateParticlesKernel <<< blocksPerGrid, threadsPerBlock>>>(molCount, r_max, vars->gpu_mTorquex, vars->gpu_mTorquey, vars->gpu_mTorquez, vars->gpu_inForceRange, step, key, seed, vars->gpu_x, vars->gpu_y, vars->gpu_z, gpu_particleMol, atomCount, xAxes, yAxes, zAxes, vars->gpu_comx, vars->gpu_comy, vars->gpu_comz, vars->gpu_cell_x[box], vars->gpu_cell_y[box], vars->gpu_cell_z[box], vars->gpu_Invcell_x[box], vars->gpu_Invcell_y[box], vars->gpu_Invcell_z[box], vars->gpu_nonOrth, lambdaBETA, vars->gpu_r_k_x, vars->gpu_r_k_y, vars->gpu_r_k_z, gpu_isMoleculeInvolved); cudaDeviceSynchronize(); checkLastErrorCUDA(__FILE__, __LINE__); cudaMemcpy(newMolPos.x, vars->gpu_x, atomCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newMolPos.y, vars->gpu_y, atomCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newMolPos.z, vars->gpu_z, atomCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(r_k.x, vars->gpu_r_k_x, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(r_k.y, vars->gpu_r_k_y, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(r_k.z, vars->gpu_r_k_z, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&inForceRange[0], vars->gpu_inForceRange, molCount * sizeof(int), cudaMemcpyDeviceToHost); CUFREE(gpu_isMoleculeInvolved); CUFREE(gpu_particleMol); checkLastErrorCUDA(__FILE__, __LINE__); } __global__ void TranslateParticlesKernel(unsigned int numberOfMolecules, double t_max, double *molForcex, double *molForcey, double *molForcez, int *gpu_inForceRange, ulong step, unsigned int key, ulong seed, double *gpu_x, double *gpu_y, double *gpu_z, int *gpu_particleMol, int atomCount, double xAxes, double yAxes, double zAxes, double *gpu_comx, double *gpu_comy, double *gpu_comz, double *gpu_cell_x, double *gpu_cell_y, double *gpu_cell_z, double *gpu_Invcell_x, double *gpu_Invcell_y, double *gpu_Invcell_z, int *gpu_nonOrth, double lambdaBETA, double *gpu_t_k_x, double *gpu_t_k_y, double *gpu_t_k_z, int8_t *gpu_isMoleculeInvolved, double *gpu_mForceRecx, double *gpu_mForceRecy, double *gpu_mForceRecz) { int atomNumber = blockIdx.x * blockDim.x + threadIdx.x; if(atomNumber >= atomCount) return; int molIndex = gpu_particleMol[atomNumber]; if(!gpu_isMoleculeInvolved[molIndex]) return; bool updateMol = atomNumber == 0 || (gpu_particleMol[atomNumber] != gpu_particleMol[atomNumber - 1]); // This section calculates the amount of shift double lbfx = (molForcex[molIndex] + gpu_mForceRecx[molIndex]) * lambdaBETA; double lbfy = (molForcey[molIndex] + gpu_mForceRecy[molIndex]) * lambdaBETA; double lbfz = (molForcez[molIndex] + gpu_mForceRecz[molIndex]) * lambdaBETA; double lbmaxx = lbfx * t_max; double lbmaxy = lbfy * t_max; double lbmaxz = lbfz * t_max; double shiftx, shifty, shiftz; bool forceInRange; forceInRange = (std::abs(lbmaxx) > MIN_FORCE && std::abs(lbmaxx) < MAX_FORCE && std::abs(lbmaxy) > MIN_FORCE && std::abs(lbmaxy) < MAX_FORCE && std::abs(lbmaxz) > MIN_FORCE && std::abs(lbmaxz) < MAX_FORCE); if (forceInRange) { double3 randnums = randomCoordsGPU(molIndex, key, step, seed); shiftx = log(exp(-1.0 * lbmaxx) + 2 * randnums.x * sinh(lbmaxx)) / lbfx; shifty = log(exp(-1.0 * lbmaxy) + 2 * randnums.y * sinh(lbmaxy)) / lbfy; shiftz = log(exp(-1.0 * lbmaxz) + 2 * randnums.z * sinh(lbmaxz)) / lbfz; } else { double3 randnums = SymRandomCoordsGPU(molIndex, key, step, seed); shiftx = t_max * randnums.x; shifty = t_max * randnums.y; shiftz = t_max * randnums.z; } // perform the shift on the coordinates double3 coor = make_double3(gpu_x[atomNumber] + shiftx, gpu_y[atomNumber] + shifty, gpu_z[atomNumber] + shiftz); // rewrapping double3 axes = make_double3(xAxes, yAxes, zAxes); if (gpu_nonOrth[0]) WrapPBCNonOrth3(coor, axes, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); else WrapPBC3(coor, axes); gpu_x[atomNumber] = coor.x; gpu_y[atomNumber] = coor.y; gpu_z[atomNumber] = coor.z; //update the CoM just once per molecule if(updateMol) { double3 com = make_double3(gpu_comx[molIndex] + shiftx, gpu_comy[molIndex] + shifty, gpu_comz[molIndex] + shiftz); if (gpu_nonOrth[0]) WrapPBCNonOrth3(com, axes, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); else WrapPBC3(com, axes); gpu_comx[molIndex] = com.x; gpu_comy[molIndex] = com.y; gpu_comz[molIndex] = com.z; gpu_t_k_x[molIndex] = shiftx; gpu_t_k_y[molIndex] = shifty; gpu_t_k_z[molIndex] = shiftz; gpu_inForceRange[molIndex] = forceInRange; } } __global__ void RotateParticlesKernel(unsigned int numberOfMolecules, double r_max, double *molTorquex, double *molTorquey, double *molTorquez, int *gpu_inForceRange, ulong step, unsigned int key, ulong seed, double *gpu_x, double *gpu_y, double *gpu_z, int *gpu_particleMol, int atomCount, double xAxes, double yAxes, double zAxes, double *gpu_comx, double *gpu_comy, double *gpu_comz, double *gpu_cell_x, double *gpu_cell_y, double *gpu_cell_z, double *gpu_Invcell_x, double *gpu_Invcell_y, double *gpu_Invcell_z, int *gpu_nonOrth, double lambdaBETA, double *gpu_r_k_x, double *gpu_r_k_y, double *gpu_r_k_z, int8_t *gpu_isMoleculeInvolved) { int atomNumber = blockIdx.x * blockDim.x + threadIdx.x; if(atomNumber >= atomCount) return; int molIndex = gpu_particleMol[atomNumber]; if(!gpu_isMoleculeInvolved[molIndex]) return; bool updateMol = atomNumber == 0 || (gpu_particleMol[atomNumber] != gpu_particleMol[atomNumber - 1]); // This section calculates the amount of rotation double lbtx = molTorquex[molIndex] * lambdaBETA; double lbty = molTorquey[molIndex] * lambdaBETA; double lbtz = molTorquez[molIndex] * lambdaBETA; double lbmaxx = lbtx * r_max; double lbmaxy = lbty * r_max; double lbmaxz = lbtz * r_max; double rotx, roty, rotz, theta; double3 rotvec; bool forceInRange; forceInRange = (std::abs(lbmaxx) > MIN_FORCE && std::abs(lbmaxx) < MAX_FORCE && std::abs(lbmaxy) > MIN_FORCE && std::abs(lbmaxy) < MAX_FORCE && std::abs(lbmaxz) > MIN_FORCE && std::abs(lbmaxz) < MAX_FORCE); if (forceInRange) { double3 randnums = randomCoordsGPU(molIndex, key, step, seed); rotx = log(exp(-1.0 * lbmaxx) + 2 * randnums.x * sinh(lbmaxx)) / lbtx; roty = log(exp(-1.0 * lbmaxy) + 2 * randnums.y * sinh(lbmaxy)) / lbty; rotz = log(exp(-1.0 * lbmaxz) + 2 * randnums.z * sinh(lbmaxz)) / lbtz; theta = sqrt(rotx * rotx + roty * roty + rotz * rotz); rotvec = make_double3(rotx * (1.0/theta), roty * (1.0/theta), rotz * (1.0/theta)); } else { double3 randnums = RandomCoordsOnSphereGPU(molIndex, key, step, seed); //These values are ignored if !forceInRange so just initialize to zero. rotx = 0.0; roty = 0.0; rotz = 0.0; theta = r_max * SymRandomGPU(molIndex, key, step, seed); rotvec = randnums; } if(updateMol) { gpu_r_k_x[molIndex] = rotx; gpu_r_k_y[molIndex] = roty; gpu_r_k_z[molIndex] = rotz; gpu_inForceRange[molIndex] = forceInRange; } // perform the rotation on the coordinates ApplyRotation(gpu_x[atomNumber], gpu_y[atomNumber], gpu_z[atomNumber], gpu_comx[molIndex], gpu_comy[molIndex], gpu_comz[molIndex], theta, rotvec, xAxes, yAxes, zAxes, *gpu_nonOrth, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); } // CUDA implementation of MultiParticle Brownian transformation void BrownianMotionRotateParticlesGPU( VariablesCUDA *vars, const std::vector<unsigned int> &moleculeInvolved, XYZArray &mTorque, XYZArray &newMolPos, XYZArray &newCOMs, XYZArray &r_k, const XYZ &boxAxes, const double BETA, const double r_max, ulong step, unsigned int key, ulong seed, const int box, const bool isOrthogonal, int *kill) { int atomCount = newMolPos.Count(); int molCount = newCOMs.Count(); int molCountInBox = moleculeInvolved.size(); int *gpu_moleculeInvolved; // Each block would handle one molecule int threadsPerBlock = 32; int blocksPerGrid = molCountInBox; CUMALLOC((void **) &gpu_moleculeInvolved, molCountInBox * sizeof(int)); cudaMemcpy(vars->gpu_mTorquex, mTorque.x, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mTorquey, mTorque.y, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mTorquez, mTorque.z, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_x, newMolPos.x, atomCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_y, newMolPos.y, atomCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_z, newMolPos.z, atomCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_comx, newCOMs.x, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_comy, newCOMs.y, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_comz, newCOMs.z, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(gpu_moleculeInvolved, &moleculeInvolved[0], molCountInBox * sizeof(int), cudaMemcpyHostToDevice); double3 axis = make_double3(boxAxes.x, boxAxes.y, boxAxes.z); double3 halfAx = make_double3(boxAxes.x * 0.5, boxAxes.y * 0.5, boxAxes.z * 0.5); if (isOrthogonal) BrownianMotionRotateKernel<true><<< blocksPerGrid, threadsPerBlock>>>( vars->gpu_startAtomIdx, vars->gpu_x, vars->gpu_y, vars->gpu_z, vars->gpu_mTorquex, vars->gpu_mTorquey, vars->gpu_mTorquez, vars->gpu_comx, vars->gpu_comy, vars->gpu_comz, vars->gpu_r_k_x, vars->gpu_r_k_y, vars->gpu_r_k_z, gpu_moleculeInvolved, vars->gpu_cell_x[box], vars->gpu_cell_y[box], vars->gpu_cell_z[box], vars->gpu_Invcell_x[box], vars->gpu_Invcell_y[box], vars->gpu_Invcell_z[box], axis, halfAx, atomCount, r_max, step, key, seed, BETA, kill); else BrownianMotionRotateKernel<true><<< blocksPerGrid, threadsPerBlock>>>( vars->gpu_startAtomIdx, vars->gpu_x, vars->gpu_y, vars->gpu_z, vars->gpu_mTorquex, vars->gpu_mTorquey, vars->gpu_mTorquez, vars->gpu_comx, vars->gpu_comy, vars->gpu_comz, vars->gpu_r_k_x, vars->gpu_r_k_y, vars->gpu_r_k_z, gpu_moleculeInvolved, vars->gpu_cell_x[box], vars->gpu_cell_y[box], vars->gpu_cell_z[box], vars->gpu_Invcell_x[box], vars->gpu_Invcell_y[box], vars->gpu_Invcell_z[box], axis, halfAx, atomCount, r_max, step, key, seed, BETA, kill); cudaDeviceSynchronize(); checkLastErrorCUDA(__FILE__, __LINE__); cudaMemcpy(newMolPos.x, vars->gpu_x, atomCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newMolPos.y, vars->gpu_y, atomCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newMolPos.z, vars->gpu_z, atomCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(r_k.x, vars->gpu_r_k_x, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(r_k.y, vars->gpu_r_k_y, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(r_k.z, vars->gpu_r_k_z, molCount * sizeof(double), cudaMemcpyDeviceToHost); CUFREE(gpu_moleculeInvolved); checkLastErrorCUDA(__FILE__, __LINE__); } template<const bool isOrthogonal> __global__ void BrownianMotionRotateKernel( int *startAtomIdx, double *gpu_x, double *gpu_y, double *gpu_z, double *molTorquex, double *molTorquey, double *molTorquez, double *gpu_comx, double *gpu_comy, double *gpu_comz, double *gpu_r_k_x, double *gpu_r_k_y, double *gpu_r_k_z, int *moleculeInvolved, double *gpu_cell_x, double *gpu_cell_y, double *gpu_cell_z, double *gpu_Invcell_x, double *gpu_Invcell_y, double *gpu_Invcell_z, double3 axis, double3 halfAx, int atomCount, double r_max, ulong step, unsigned int key, ulong seed, double BETA, int *kill) { //Each block takes care of one molecule int molIndex = moleculeInvolved[blockIdx.x]; int startIdx = startAtomIdx[molIndex]; int endIdx = startAtomIdx[molIndex + 1]; int atomIdx; __shared__ double matrix[3][3]; __shared__ double3 com; // thread 0 will set up the matrix and update the gpu_r_k if(threadIdx.x == 0) { com = make_double3(gpu_comx[molIndex], gpu_comy[molIndex], gpu_comz[molIndex]); // This section calculates the amount of rotation double stdDev = sqrt(2.0 * r_max); double btm_x = molTorquex[molIndex] * BETA * r_max; double btm_y = molTorquey[molIndex] * BETA * r_max; double btm_z = molTorquez[molIndex] * BETA * r_max; double3 randnums = randomGaussianCoordsGPU(molIndex, key, step, seed, 0.0, stdDev); double rot_x = btm_x + randnums.x; double rot_y = btm_y + randnums.y; double rot_z = btm_z + randnums.z; // update the trial torque gpu_r_k_x[molIndex] = rot_x; gpu_r_k_y[molIndex] = rot_y; gpu_r_k_z[molIndex] = rot_z; //check for bad configuration if(!isfinite(rot_x + rot_y + rot_z)) { atomicAdd(kill, 1); } // build rotation matrix double cross[3][3], tensor[3][3]; double rotLen = sqrt(rot_x * rot_x + rot_y * rot_y + rot_z * rot_z); double axisx = rot_x * (1.0 / rotLen); double axisy = rot_y * (1.0 / rotLen); double axisz = rot_z * (1.0 / rotLen); // build cross cross[0][0] = 0.0; cross[0][1] = -axisz; cross[0][2] = axisy; cross[1][0] = axisz; cross[1][1] = 0.0; cross[1][2] = -axisx; cross[2][0] = -axisy; cross[2][1] = axisx; cross[2][2] = 0.0; // build tensor int i, j; for(i = 0; i < 3; ++i) { tensor[0][i] = axisx; tensor[1][i] = axisy; tensor[2][i] = axisz; } for(i = 0; i < 3; ++i) { tensor[i][0] *= axisx; tensor[i][1] *= axisy; tensor[i][2] *= axisz; } // build matrix double s, c; sincos(rotLen, &s, &c); for(i = 0; i < 3; ++i) { for(j = 0; j < 3; ++j) { matrix[i][j] = 0.0; } matrix[i][i] = c; } for(i = 0; i < 3; ++i) { for(j = 0; j < 3; ++j) { matrix[i][j] += s * cross[i][j] + (1 - c) * tensor[i][j]; } } } __syncthreads(); // use stride of blockDim.x, which is 32 // each thread handles one atom rotation for(atomIdx = startIdx + threadIdx.x; atomIdx < endIdx; atomIdx += blockDim.x) { double3 coor = make_double3(gpu_x[atomIdx], gpu_y[atomIdx], gpu_z[atomIdx]); // unwrap molecule if(isOrthogonal) UnwrapPBC3(coor, com, axis, halfAx); else UnwrapPBCNonOrth3(coor, com, axis, halfAx, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); // move COM of molecule to zero coor.x -= com.x; coor.y -= com.y; coor.z -= com.z; // rotate double newx = matrix[0][0] * coor.x + matrix[0][1] * coor.y + matrix[0][2] * coor.z; double newy = matrix[1][0] * coor.x + matrix[1][1] * coor.y + matrix[1][2] * coor.z; double newz = matrix[2][0] * coor.x + matrix[2][1] * coor.y + matrix[2][2] * coor.z; // move back to com coor.x = newx + com.x; coor.y = newy + com.y; coor.z = newz + com.z; // wrap again if(isOrthogonal) WrapPBC3(coor, axis); else WrapPBCNonOrth3(coor, axis, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); // update the new position gpu_x[atomIdx] = coor.x; gpu_y[atomIdx] = coor.y; gpu_z[atomIdx] = coor.z; } } void BrownianMotionTranslateParticlesGPU( VariablesCUDA *vars, const std::vector<unsigned int> &moleculeInvolved, XYZArray &mForce, XYZArray &mForceRec, XYZArray &newMolPos, XYZArray &newCOMs, XYZArray &t_k, const XYZ &boxAxes, const double BETA, const double t_max, ulong step, unsigned int key, ulong seed, const int box, const bool isOrthogonal, int *kill) { int atomCount = newMolPos.Count(); int molCount = newCOMs.Count(); int molCountInBox = moleculeInvolved.size(); int *gpu_moleculeInvolved; // Each block would handle one molecule int threadsPerBlock = 32; int blocksPerGrid = molCountInBox; CUMALLOC((void **) &gpu_moleculeInvolved, molCountInBox * sizeof(int)); cudaMemcpy(vars->gpu_mForcex, mForce.x, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mForcey, mForce.y, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mForcez, mForce.z, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mForceRecx, mForceRec.x, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mForceRecy, mForceRec.y, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_mForceRecz, mForceRec.z, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_x, newMolPos.x, atomCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_y, newMolPos.y, atomCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_z, newMolPos.z, atomCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_comx, newCOMs.x, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_comy, newCOMs.y, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vars->gpu_comz, newCOMs.z, molCount * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(gpu_moleculeInvolved, &moleculeInvolved[0], molCountInBox * sizeof(int), cudaMemcpyHostToDevice); double3 axis = make_double3(boxAxes.x, boxAxes.y, boxAxes.z); double3 halfAx = make_double3(boxAxes.x * 0.5, boxAxes.y * 0.5, boxAxes.z * 0.5); if (isOrthogonal) BrownianMotionTranslateKernel<true><<< blocksPerGrid, threadsPerBlock>>>( vars->gpu_startAtomIdx, vars->gpu_x, vars->gpu_y, vars->gpu_z, vars->gpu_mForcex, vars->gpu_mForcey, vars->gpu_mForcez, vars->gpu_mForceRecx, vars->gpu_mForceRecy, vars->gpu_mForceRecz, vars->gpu_comx, vars->gpu_comy, vars->gpu_comz, vars->gpu_t_k_x, vars->gpu_t_k_y, vars->gpu_t_k_z, gpu_moleculeInvolved, vars->gpu_cell_x[box], vars->gpu_cell_y[box], vars->gpu_cell_z[box], vars->gpu_Invcell_x[box], vars->gpu_Invcell_y[box], vars->gpu_Invcell_z[box], axis, halfAx, atomCount, t_max, step, key, seed, BETA, kill); else BrownianMotionTranslateKernel<false><<< blocksPerGrid, threadsPerBlock>>>( vars->gpu_startAtomIdx, vars->gpu_x, vars->gpu_y, vars->gpu_z, vars->gpu_mForcex, vars->gpu_mForcey, vars->gpu_mForcez, vars->gpu_mForceRecx, vars->gpu_mForceRecy, vars->gpu_mForceRecz, vars->gpu_comx, vars->gpu_comy, vars->gpu_comz, vars->gpu_t_k_x, vars->gpu_t_k_y, vars->gpu_t_k_z, gpu_moleculeInvolved, vars->gpu_cell_x[box], vars->gpu_cell_y[box], vars->gpu_cell_z[box], vars->gpu_Invcell_x[box], vars->gpu_Invcell_y[box], vars->gpu_Invcell_z[box], axis, halfAx, atomCount, t_max, step, key, seed, BETA, kill); cudaDeviceSynchronize(); checkLastErrorCUDA(__FILE__, __LINE__); cudaMemcpy(newMolPos.x, vars->gpu_x, atomCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newMolPos.y, vars->gpu_y, atomCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newMolPos.z, vars->gpu_z, atomCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newCOMs.x, vars->gpu_comx, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newCOMs.y, vars->gpu_comy, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(newCOMs.z, vars->gpu_comz, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(t_k.x, vars->gpu_t_k_x, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(t_k.y, vars->gpu_t_k_y, molCount * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(t_k.z, vars->gpu_t_k_z, molCount * sizeof(double), cudaMemcpyDeviceToHost); CUFREE(gpu_moleculeInvolved); checkLastErrorCUDA(__FILE__, __LINE__); } template<const bool isOrthogonal> __global__ void BrownianMotionTranslateKernel( int *startAtomIdx, double *gpu_x, double *gpu_y, double *gpu_z, double *molForcex, double *molForcey, double *molForcez, double *molForceRecx, double *molForceRecy, double *molForceRecz, double *gpu_comx, double *gpu_comy, double *gpu_comz, double *gpu_t_k_x, double *gpu_t_k_y, double *gpu_t_k_z, int *moleculeInvolved, double *gpu_cell_x, double *gpu_cell_y, double *gpu_cell_z, double *gpu_Invcell_x, double *gpu_Invcell_y, double *gpu_Invcell_z, double3 axis, double3 halfAx, int atomCount, double t_max, ulong step, unsigned int key, ulong seed, double BETA, int *kill) { //Each block takes care of one molecule int molIndex = moleculeInvolved[blockIdx.x]; int startIdx = startAtomIdx[molIndex]; int endIdx = startAtomIdx[molIndex + 1]; int atomIdx; __shared__ double3 shift; // thread 0 will calculate the shift vector and update COM and gpu_t_k if(threadIdx.x == 0) { double3 com = make_double3(gpu_comx[molIndex], gpu_comy[molIndex], gpu_comz[molIndex]); // This section calculates the amount of shift double stdDev = sqrt(2.0 * t_max); double bfm_x = (molForcex[molIndex] + molForceRecx[molIndex]) * BETA * t_max; double bfm_y = (molForcey[molIndex] + molForceRecy[molIndex]) * BETA * t_max; double bfm_z = (molForcez[molIndex] + molForceRecz[molIndex]) * BETA * t_max; double3 randnums = randomGaussianCoordsGPU(molIndex, key, step, seed, 0.0, stdDev); shift.x = bfm_x + randnums.x; shift.y = bfm_y + randnums.y; shift.z = bfm_z + randnums.z; // update the trial translate gpu_t_k_x[molIndex] = shift.x; gpu_t_k_y[molIndex] = shift.y; gpu_t_k_z[molIndex] = shift.z; // shift COM com.x += shift.x; com.y += shift.y; com.z += shift.z; // wrap COM if(isOrthogonal) WrapPBC3(com, axis); else WrapPBCNonOrth3(com, axis, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); //update COM gpu_comx[molIndex] = com.x; gpu_comy[molIndex] = com.y; gpu_comz[molIndex] = com.z; //check for bad configuration if(!isfinite(shift.x + shift.y + shift.z)) { atomicAdd(kill, 1); } else if (shift.x > halfAx.x || shift.y > halfAx.y || shift.z > halfAx.z) { atomicAdd(kill, 1); } } __syncthreads(); // use stride of blockDim.x, which is 32 // each thread handles one atom translation for(atomIdx = startIdx + threadIdx.x; atomIdx < endIdx; atomIdx += blockDim.x) { double3 coor = make_double3(gpu_x[atomIdx], gpu_y[atomIdx], gpu_z[atomIdx]); // translate the atom coor.x += shift.x; coor.y += shift.y; coor.z += shift.z; // wrap coordinate if(isOrthogonal) WrapPBC3(coor, axis); else WrapPBCNonOrth3(coor, axis, gpu_cell_x, gpu_cell_y, gpu_cell_z, gpu_Invcell_x, gpu_Invcell_y, gpu_Invcell_z); // update the new position gpu_x[atomIdx] = coor.x; gpu_y[atomIdx] = coor.y; gpu_z[atomIdx] = coor.z; } } #endif
86dd46537d8c6cf856b0536d93607bbc6520ac4a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file sigmoid_internal_device.cu * @author Daniel Nichols * @version 0.0.1 * @date 2019-02-23 * * @copyright Copyright (c) 2019 */ #include "compute/sigmoid/sigmoid_internal.h" namespace magmadnn { namespace internal { template <typename T> __global__ void kernel_fast_sigmoid_full_device(unsigned int size, T *x) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { x[i] = x[i] / (1 + abs(x[i])); } } template <typename T> __global__ void kernel_sigmoid_full_device(unsigned int size, T *x) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { x[i] = 1 / (1 + exp(-x[i])); } } /* exp(INT_TYPE) is not defined in CUDA, so just use 1/(1+|x|) for int. Everything will be zero anyways. TODO: decide what to do with int sigmoid. */ template <> __global__ void kernel_sigmoid_full_device(unsigned int size, int *x) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { x[i] = 1 / (1 + abs(x[i])); } } template <typename T> void sigmoid_full_device(Tensor<T> *x, bool fast) { if (fast) hipLaunchKernelGGL(( kernel_fast_sigmoid_full_device) , dim3(x->get_size()), dim3(1), 0, 0, x->get_size(), x->get_ptr()); else hipLaunchKernelGGL(( kernel_sigmoid_full_device) , dim3(x->get_size()), dim3(1), 0, 0, x->get_size(), x->get_ptr()); } template<> void sigmoid_full_device(Tensor<int> *x, bool fast) { /* sigmoid doesn't make much sense on integer precision */ for (unsigned int i = 0; i < x->get_size(); i++) x->set(i, (int) exp(x->get(i))); } template void sigmoid_full_device(Tensor<float> *x, bool fast); template void sigmoid_full_device(Tensor<double> *x, bool fast); } // namespace internal } // namespace magmadnn
86dd46537d8c6cf856b0536d93607bbc6520ac4a.cu
/** * @file sigmoid_internal_device.cu * @author Daniel Nichols * @version 0.0.1 * @date 2019-02-23 * * @copyright Copyright (c) 2019 */ #include "compute/sigmoid/sigmoid_internal.h" namespace magmadnn { namespace internal { template <typename T> __global__ void kernel_fast_sigmoid_full_device(unsigned int size, T *x) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { x[i] = x[i] / (1 + abs(x[i])); } } template <typename T> __global__ void kernel_sigmoid_full_device(unsigned int size, T *x) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { x[i] = 1 / (1 + exp(-x[i])); } } /* exp(INT_TYPE) is not defined in CUDA, so just use 1/(1+|x|) for int. Everything will be zero anyways. TODO: decide what to do with int sigmoid. */ template <> __global__ void kernel_sigmoid_full_device(unsigned int size, int *x) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { x[i] = 1 / (1 + abs(x[i])); } } template <typename T> void sigmoid_full_device(Tensor<T> *x, bool fast) { if (fast) kernel_fast_sigmoid_full_device <<<x->get_size(), 1>>> (x->get_size(), x->get_ptr()); else kernel_sigmoid_full_device <<<x->get_size(), 1>>> (x->get_size(), x->get_ptr()); } template<> void sigmoid_full_device(Tensor<int> *x, bool fast) { /* sigmoid doesn't make much sense on integer precision */ for (unsigned int i = 0; i < x->get_size(); i++) x->set(i, (int) exp(x->get(i))); } template void sigmoid_full_device(Tensor<float> *x, bool fast); template void sigmoid_full_device(Tensor<double> *x, bool fast); } // namespace internal } // namespace magmadnn
a81a8b7bec557202a34f43c81714e48c95f3906f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathBlas.cu" #else THC_API accreal THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THAssert(THCTensor_(checkGPU)(state, 2, self, src)); THArgCheck(THCTensor_(nElement)(state, self) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); self = THCTensor_(newContiguous)(state, self); src = THCTensor_(newContiguous)(state, src); #ifdef THC_REAL_IS_FLOAT accreal result = THCudaBlas_Sdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_DOUBLE) accreal result = THCudaBlas_Ddot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_HALF) accreal result = THCudaBlas_Hdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #endif THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; #else THError("unimplemented data type"); return ScalarConvert<int, accreal>::to(0); #endif } THC_API void THCTensor_(addmv)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *mat, THCTensor *vec) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THAssert(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec)); if( (mat->nDimension != 2) || (vec->nDimension != 1) ) THError("matrix and vector expected"); if( mat->size[1] != vec->size[0] ) THError("size mismatch"); if(t->nDimension != 1) THError("size mismatch"); if(t->size[0] != mat->size[0]) THError("size mismatch"); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if(r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(mat->stride[0] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 'n', mat->size[0], mat->size[1], alpha, THCTensor_(data)(state, mat), mat->stride[1], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 'n', mat->size[0], mat->size[1], alpha, THCTensor_(data)(state, mat), mat->stride[1], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #endif } else if(mat->stride[1] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, mat), mat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, mat), mat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #endif } else { THCTensor *cmat = THCTensor_(newContiguous)(state, mat); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, cmat), cmat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, cmat), cmat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #endif THCTensor_(free)(state, cmat); } #elif defined(THC_REAL_IS_HALF) // Currently no Hgemv/SgemvEx in Cublas THCTensor *vecAsMatrix = THCTensor_(newWithTensor)(state, vec); THCTensor_(resize2d)(state, vecAsMatrix, vecAsMatrix->size[0], 1); THCTensor *tAsMatrix = THCTensor_(newWithTensor)(state, t); THCTensor_(resize2d)(state, tAsMatrix, tAsMatrix->size[0], 1); THCTensor_(addmm)(state, r_, beta, tAsMatrix, alpha, mat, vecAsMatrix); // r_ will have answer as matrix, need to return a vecotr THCTensor_(resize1d)(state, r_, r_->size[0]); THCTensor_(free)(state, vecAsMatrix); THCTensor_(free)(state, tAsMatrix); #endif #else THError("unimplemented data type"); #endif } THC_API void THCTensor_(addr)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *vec1, THCTensor *vec2) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THAssert(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2)); if ( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) { THError("vector and vector expected"); } if (t->nDimension != 2) { THError("size mismatch"); } if ( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) { THError("size mismatch"); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if (r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(THCNumerics<real>::ne(beta, ScalarConvert<int, real>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } if(r_->stride[0] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec1->size[0], vec2->size[0], alpha, THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, r_), r_->stride[1]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec1->size[0], vec2->size[0], alpha, THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, r_), r_->stride[1]); #endif } else if(r_->stride[1] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, r_), r_->stride[0]); #endif } else { THCTensor *cr = THCTensor_(newClone)(state, r_); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, cr), cr->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, cr), cr->stride[0]); #endif THCTensor_(freeCopyTo)(state, cr, r_); } #elif defined(THC_REAL_IS_HALF) // currently no Hger/SgerEx in Cublas. THCTensor *vec2T = THCTensor_(newWithTensor)(state, vec2); THCTensor_(resize2d)(state, vec2T, vec2T->size[0], 1); THCTensor_(transpose)(state, vec2T, NULL, 0, 1); THCTensor *vec1M = THCTensor_(newWithTensor)(state, vec1); THCTensor_(resize2d)(state, vec1M, vec1M->size[0], 1); THCTensor_(addmm)(state, r_, beta, t, alpha, vec1M, vec2T); THCTensor_(free)(state, vec2T); THCTensor_(free)(state, vec1M); #endif #else THError("unimplemented data type"); #endif } THC_API void THCTensor_(addmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *m1, THCTensor *m2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 4, r_, t, m1, m2)); char transpose_r, transpose_m1, transpose_m2; THCTensor *r__, *m1_, *m2_; if( (m1->nDimension != 2) || (m2->nDimension != 2) ) THError("matrix and matrix expected"); if(t->nDimension != 2) THError("size mismatch"); if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) || (m1->size[1] != m2->size[0]) ) THError("size mismatch"); if(t != r_) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } /* r_ */ if(r_->stride[0] == 1 && r_->stride[1] != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride[1] == 1 && r_->stride[0] != 0) { THCTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* m1 */ if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THCTensor_(newContiguous)(state, m1); } /* m2 */ if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THCTensor_(newContiguous)(state, m2); } #ifdef THC_REAL_IS_HALF THCudaBlas_Hgemm(state, transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THCTensor_(data)(state, r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); #elif defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgemm(state, transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THCTensor_(data)(state, r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm(state, transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THCTensor_(data)(state, r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); #endif /* free intermediate variables */ if(m1_ != m1) { THCTensor_(free)(state, m1_); } if(m2_ != m2) { THCTensor_(free)(state, m2_); } if(r__ != r_) { THCTensor_(freeCopyTo)(state, r__, r_); } #else THError("unimplemented data type"); #endif } THC_API void THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimension)(state, t) == 2, 4, "expected 2D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor"); long batchnum = THCTensor_(size)(state, batch1, 0); long m1d1 = THCTensor_(size)(state, batch1, 1); long innerdim = THCTensor_(size)(state, batch1, 2); long m2d2 = THCTensor_(size)(state, batch2, 2); THArgCheck(batchnum == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); // M is t, as listed in the docs under addbmm THArgCheck(m1d1 == THCTensor_(size)(state, t, 0), 6, "first dimension must match first dimension of M"); THArgCheck(m2d2 == THCTensor_(size)(state, t, 1), 7, "second dimension must match second dimension of M"); THArgCheck(innerdim == THCTensor_(size)(state, batch2, 1), 6, "second dimension must match first dimension of batch2"); if (t != result) { THCTensor_(resizeAs)(state, result, t); THCTensor_(copy)(state, result, t); } THCTensor *slice1 = THCTensor_(new)(state); THCTensor *slice2 = THCTensor_(new)(state); for (long i=0; i<batchnum; i++) { THCTensor_(select)(state, slice1, batch1, 0, i); THCTensor_(select)(state, slice2, batch2, 0, i); THCTensor_(addmm)(state, result, beta, result, alpha, slice1, slice2); beta = ScalarConvert<int, real>::to(1); } THCTensor_(free)(state, slice1); THCTensor_(free)(state, slice2); #else THError("unimplemented data type"); #endif } __global__ void createBatchGemmBuffer(const real** buffer, real* data, long stride, long num_batches) { const long idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer[idx] = data + idx * stride; } } THC_API void THCTensor_(baddbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimension)(state, t) == 3, 4, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6, "wrong matrix size"); if (t != result) { THCTensor_(resizeAs)(state, result, t); THCTensor_(copy)(state, result, t); } bool transpose_result; char transpose_batch1, transpose_batch2; long lda, ldb, ldc; THCTensor *result_, *batch1_, *batch2_; if (result->stride[1] == 1) { transpose_result = false; result_ = result; ldc = result_->stride[2]; } else if (result->stride[2] == 1) { transpose_result = true; THCTensor *swap = batch2; batch2 = batch1; batch1 = swap; result_ = result; ldc = result_->stride[1]; } else { transpose_result = false; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2); result_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, result_, NULL, 1, 2); ldc = result_->stride[2]; } if (batch1->stride[transpose_result ? 2 : 1] == 1) { transpose_batch1 = 'n'; batch1_ = batch1; lda = batch1_->stride[transpose_result ? 1 : 2]; } else if (batch1->stride[transpose_result ? 1 : 2] == 1) { transpose_batch1 = 't'; batch1_ = batch1; lda = batch1_->stride[transpose_result ? 2 : 1]; } else { transpose_batch1 = transpose_result ? 'n' : 't'; batch1_ = THCTensor_(newContiguous)(state, batch1); lda = batch1_->stride[1]; } if (batch2->stride[transpose_result ? 2 : 1] == 1) { transpose_batch2 = 'n'; batch2_ = batch2; ldb = batch2_->stride[transpose_result ? 1 : 2]; } else if (batch2->stride[transpose_result ? 1 : 2] == 1) { transpose_batch2 = 't'; batch2_ = batch2; ldb = batch2_->stride[transpose_result ? 2 : 1]; } else { transpose_batch2 = transpose_result ? 'n' : 't'; batch2_ = THCTensor_(newContiguous)(state, batch2); ldb = batch2_->stride[1]; } // Compute pointers to matrices in each batch. long num_batches = result_->size[0]; size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. const real **d_matrices1, **d_matrices2; real **d_result_matrices; THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_result_matrices, matrices_size)); const long block = 512; const long grid = (num_batches + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), d_matrices1, THCTensor_(data)(state, batch1_), batch1_->stride[0], num_batches); hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), d_matrices2, THCTensor_(data)(state, batch2_), batch2_->stride[0], num_batches); hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), (const real**)d_result_matrices, THCTensor_(data)(state,result_), result_->stride[0], num_batches); #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #endif THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCudaFree(state, d_result_matrices); if (batch1_ != batch1) { THCTensor_(free)(state, batch1_); } if (batch2_ != batch2) { THCTensor_(free)(state, batch2_); } if (result_ != result) { THCTensor_(freeCopyTo)(state, result_, result); } #else THError("unimplemented data type"); #endif } #endif
a81a8b7bec557202a34f43c81714e48c95f3906f.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathBlas.cu" #else THC_API accreal THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THAssert(THCTensor_(checkGPU)(state, 2, self, src)); THArgCheck(THCTensor_(nElement)(state, self) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); self = THCTensor_(newContiguous)(state, self); src = THCTensor_(newContiguous)(state, src); #ifdef THC_REAL_IS_FLOAT accreal result = THCudaBlas_Sdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_DOUBLE) accreal result = THCudaBlas_Ddot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_HALF) accreal result = THCudaBlas_Hdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #endif THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; #else THError("unimplemented data type"); return ScalarConvert<int, accreal>::to(0); #endif } THC_API void THCTensor_(addmv)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *mat, THCTensor *vec) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THAssert(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec)); if( (mat->nDimension != 2) || (vec->nDimension != 1) ) THError("matrix and vector expected"); if( mat->size[1] != vec->size[0] ) THError("size mismatch"); if(t->nDimension != 1) THError("size mismatch"); if(t->size[0] != mat->size[0]) THError("size mismatch"); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if(r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(mat->stride[0] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 'n', mat->size[0], mat->size[1], alpha, THCTensor_(data)(state, mat), mat->stride[1], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 'n', mat->size[0], mat->size[1], alpha, THCTensor_(data)(state, mat), mat->stride[1], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #endif } else if(mat->stride[1] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, mat), mat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, mat), mat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #endif } else { THCTensor *cmat = THCTensor_(newContiguous)(state, mat); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, cmat), cmat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size[1], mat->size[0], alpha, THCTensor_(data)(state, cmat), cmat->stride[0], THCTensor_(data)(state, vec), vec->stride[0], beta, THCTensor_(data)(state, r_), r_->stride[0]); #endif THCTensor_(free)(state, cmat); } #elif defined(THC_REAL_IS_HALF) // Currently no Hgemv/SgemvEx in Cublas THCTensor *vecAsMatrix = THCTensor_(newWithTensor)(state, vec); THCTensor_(resize2d)(state, vecAsMatrix, vecAsMatrix->size[0], 1); THCTensor *tAsMatrix = THCTensor_(newWithTensor)(state, t); THCTensor_(resize2d)(state, tAsMatrix, tAsMatrix->size[0], 1); THCTensor_(addmm)(state, r_, beta, tAsMatrix, alpha, mat, vecAsMatrix); // r_ will have answer as matrix, need to return a vecotr THCTensor_(resize1d)(state, r_, r_->size[0]); THCTensor_(free)(state, vecAsMatrix); THCTensor_(free)(state, tAsMatrix); #endif #else THError("unimplemented data type"); #endif } THC_API void THCTensor_(addr)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *vec1, THCTensor *vec2) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THAssert(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2)); if ( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) { THError("vector and vector expected"); } if (t->nDimension != 2) { THError("size mismatch"); } if ( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) { THError("size mismatch"); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if (r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(THCNumerics<real>::ne(beta, ScalarConvert<int, real>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } if(r_->stride[0] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec1->size[0], vec2->size[0], alpha, THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, r_), r_->stride[1]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec1->size[0], vec2->size[0], alpha, THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, r_), r_->stride[1]); #endif } else if(r_->stride[1] == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, r_), r_->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, r_), r_->stride[0]); #endif } else { THCTensor *cr = THCTensor_(newClone)(state, r_); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, cr), cr->stride[0]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2->size[0], vec1->size[0], alpha, THCTensor_(data)(state, vec2), vec2->stride[0], THCTensor_(data)(state, vec1), vec1->stride[0], THCTensor_(data)(state, cr), cr->stride[0]); #endif THCTensor_(freeCopyTo)(state, cr, r_); } #elif defined(THC_REAL_IS_HALF) // currently no Hger/SgerEx in Cublas. THCTensor *vec2T = THCTensor_(newWithTensor)(state, vec2); THCTensor_(resize2d)(state, vec2T, vec2T->size[0], 1); THCTensor_(transpose)(state, vec2T, NULL, 0, 1); THCTensor *vec1M = THCTensor_(newWithTensor)(state, vec1); THCTensor_(resize2d)(state, vec1M, vec1M->size[0], 1); THCTensor_(addmm)(state, r_, beta, t, alpha, vec1M, vec2T); THCTensor_(free)(state, vec2T); THCTensor_(free)(state, vec1M); #endif #else THError("unimplemented data type"); #endif } THC_API void THCTensor_(addmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *m1, THCTensor *m2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 4, r_, t, m1, m2)); char transpose_r, transpose_m1, transpose_m2; THCTensor *r__, *m1_, *m2_; if( (m1->nDimension != 2) || (m2->nDimension != 2) ) THError("matrix and matrix expected"); if(t->nDimension != 2) THError("size mismatch"); if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) || (m1->size[1] != m2->size[0]) ) THError("size mismatch"); if(t != r_) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } /* r_ */ if(r_->stride[0] == 1 && r_->stride[1] != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride[1] == 1 && r_->stride[0] != 0) { THCTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* m1 */ if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THCTensor_(newContiguous)(state, m1); } /* m2 */ if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THCTensor_(newContiguous)(state, m2); } #ifdef THC_REAL_IS_HALF THCudaBlas_Hgemm(state, transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THCTensor_(data)(state, r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); #elif defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgemm(state, transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THCTensor_(data)(state, r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm(state, transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THCTensor_(data)(state, r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); #endif /* free intermediate variables */ if(m1_ != m1) { THCTensor_(free)(state, m1_); } if(m2_ != m2) { THCTensor_(free)(state, m2_); } if(r__ != r_) { THCTensor_(freeCopyTo)(state, r__, r_); } #else THError("unimplemented data type"); #endif } THC_API void THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimension)(state, t) == 2, 4, "expected 2D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor"); long batchnum = THCTensor_(size)(state, batch1, 0); long m1d1 = THCTensor_(size)(state, batch1, 1); long innerdim = THCTensor_(size)(state, batch1, 2); long m2d2 = THCTensor_(size)(state, batch2, 2); THArgCheck(batchnum == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); // M is t, as listed in the docs under addbmm THArgCheck(m1d1 == THCTensor_(size)(state, t, 0), 6, "first dimension must match first dimension of M"); THArgCheck(m2d2 == THCTensor_(size)(state, t, 1), 7, "second dimension must match second dimension of M"); THArgCheck(innerdim == THCTensor_(size)(state, batch2, 1), 6, "second dimension must match first dimension of batch2"); if (t != result) { THCTensor_(resizeAs)(state, result, t); THCTensor_(copy)(state, result, t); } THCTensor *slice1 = THCTensor_(new)(state); THCTensor *slice2 = THCTensor_(new)(state); for (long i=0; i<batchnum; i++) { THCTensor_(select)(state, slice1, batch1, 0, i); THCTensor_(select)(state, slice2, batch2, 0, i); THCTensor_(addmm)(state, result, beta, result, alpha, slice1, slice2); beta = ScalarConvert<int, real>::to(1); } THCTensor_(free)(state, slice1); THCTensor_(free)(state, slice2); #else THError("unimplemented data type"); #endif } __global__ void createBatchGemmBuffer(const real** buffer, real* data, long stride, long num_batches) { const long idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer[idx] = data + idx * stride; } } THC_API void THCTensor_(baddbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimension)(state, t) == 3, 4, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6, "wrong matrix size"); if (t != result) { THCTensor_(resizeAs)(state, result, t); THCTensor_(copy)(state, result, t); } bool transpose_result; char transpose_batch1, transpose_batch2; long lda, ldb, ldc; THCTensor *result_, *batch1_, *batch2_; if (result->stride[1] == 1) { transpose_result = false; result_ = result; ldc = result_->stride[2]; } else if (result->stride[2] == 1) { transpose_result = true; THCTensor *swap = batch2; batch2 = batch1; batch1 = swap; result_ = result; ldc = result_->stride[1]; } else { transpose_result = false; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2); result_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, result_, NULL, 1, 2); ldc = result_->stride[2]; } if (batch1->stride[transpose_result ? 2 : 1] == 1) { transpose_batch1 = 'n'; batch1_ = batch1; lda = batch1_->stride[transpose_result ? 1 : 2]; } else if (batch1->stride[transpose_result ? 1 : 2] == 1) { transpose_batch1 = 't'; batch1_ = batch1; lda = batch1_->stride[transpose_result ? 2 : 1]; } else { transpose_batch1 = transpose_result ? 'n' : 't'; batch1_ = THCTensor_(newContiguous)(state, batch1); lda = batch1_->stride[1]; } if (batch2->stride[transpose_result ? 2 : 1] == 1) { transpose_batch2 = 'n'; batch2_ = batch2; ldb = batch2_->stride[transpose_result ? 1 : 2]; } else if (batch2->stride[transpose_result ? 1 : 2] == 1) { transpose_batch2 = 't'; batch2_ = batch2; ldb = batch2_->stride[transpose_result ? 2 : 1]; } else { transpose_batch2 = transpose_result ? 'n' : 't'; batch2_ = THCTensor_(newContiguous)(state, batch2); ldb = batch2_->stride[1]; } // Compute pointers to matrices in each batch. long num_batches = result_->size[0]; size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. const real **d_matrices1, **d_matrices2; real **d_result_matrices; THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_result_matrices, matrices_size)); const long block = 512; const long grid = (num_batches + block - 1) / block; createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>( d_matrices1, THCTensor_(data)(state, batch1_), batch1_->stride[0], num_batches); createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>( d_matrices2, THCTensor_(data)(state, batch2_), batch2_->stride[0], num_batches); createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>( (const real**)d_result_matrices, THCTensor_(data)(state,result_), result_->stride[0], num_batches); #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmBatched( state, transpose_batch1, transpose_batch2, result_->size[transpose_result ? 2 : 1], result_->size[transpose_result ? 1 : 2], batch1_->size[transpose_result ? 1 : 2], alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #endif THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCudaFree(state, d_result_matrices); if (batch1_ != batch1) { THCTensor_(free)(state, batch1_); } if (batch2_ != batch2) { THCTensor_(free)(state, batch2_); } if (result_ != result) { THCTensor_(freeCopyTo)(state, result_, result); } #else THError("unimplemented data type"); #endif } #endif
b3f981eaa6218c498ca9bdaa1dc94bdc0a9d6bf4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "BatchMatMulExecution.hpp" namespace MNN { namespace CUDA { template <typename T> __global__ void transpose_bias(T *input, T *output, const T* bias, int batch, int e, int h) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < batch * e * h; index += blockDim.x * gridDim.x) { int i = index % (e*h); int b = index / (e*h); int y = i / e; output[index] = input[index] + bias[b * h + y]; } return; } BatchMatMulExecution::BatchMatMulExecution(bool transposeA, bool transposeB, Backend *backend) : Execution(backend) { mTransposeA = transposeA; mTransposeB = transposeB; } BatchMatMulExecution::~ BatchMatMulExecution() { // do nothing } ErrorCode BatchMatMulExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) { auto C = outputs[0]; auto dimensions = C->dimensions(); int batch = 1; for (int i = 0; i < dimensions - 2; ++i) { batch *= C->length(i); } auto e = C->length(dimensions-2); auto h = C->length(dimensions-1); if(inputs.size() > 2) { mTempOutput.reset(Tensor::createDevice<float>({batch*h*e})); auto res = backend()->onAcquireBuffer(mTempOutput.get(), Backend::DYNAMIC); if (!res) { return OUT_OF_MEMORY; } backend()->onReleaseBuffer(mTempOutput.get(), Backend::DYNAMIC); } return NO_ERROR; } ErrorCode BatchMatMulExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) { auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime(); auto blasHandle = runtime->cublas_handle(); const Tensor* A = inputs[0]; const Tensor* B = inputs[1]; auto dimensions = A->dimensions(); int batch = 1; for (int i = 0; i < dimensions - 2; ++i) { batch *= A->length(i); } auto w0 = inputs[0]->length(dimensions-1); auto h0 = inputs[0]->length(dimensions-2); auto C = outputs[0]; auto e = C->length(dimensions-2); auto h = C->length(dimensions-1); auto l = w0; if (mTransposeA) { l = h0; } auto APtr = (const float*)A->deviceId(); auto BPtr = (const float*)B->deviceId(); auto CDestPtr = (float*)C->deviceId(); float alpha = 1.0f; float beta = 0.0f; auto tranB = HIPBLAS_OP_N; auto ldB = h; if (mTransposeB) { ldB = l; tranB = HIPBLAS_OP_T; } auto tranA = HIPBLAS_OP_N; auto ldA = l; if (mTransposeA) { ldA = e; tranA = HIPBLAS_OP_T; } if(inputs.size() == 2) { auto status = hipblasSgemmStridedBatched(blasHandle, tranB, tranA, h, e, l, &alpha, BPtr, ldB, l*h, APtr, ldA, e*l, &beta, CDestPtr, h, e*h, batch); cublas_check(status); //hipDeviceSynchronize(); } else { auto CPtr = (float*)mTempOutput->deviceId(); auto status = hipblasSgemmStridedBatched(blasHandle, tranB, tranA, h, e, l, &alpha, BPtr, ldB, l*h, APtr, ldA, e*l, &beta, CPtr, h, e*h, batch); cublas_check(status); //hipDeviceSynchronize(); // Transpose batch, h, e -> batch, e, h int block_num = runtime->blocks_num(batch*e*h); int threads_num = runtime->threads_num(); hipLaunchKernelGGL(( transpose_bias), dim3(block_num), dim3(threads_num), 0, 0, (float*)CPtr, (float*)CDestPtr, (const float*)inputs[2]->deviceId(), batch, e, h); } return NO_ERROR; } class BatchMatMulCreator : public CUDABackend::Creator { public: virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op, Backend* backend) const override { auto param = op->main_as_BatchMatMulParam(); return new BatchMatMulExecution(param->adjX(), param->adjY(), backend); } }; static CUDACreatorRegister<BatchMatMulCreator> __init(OpType_BatchMatMul); } }
b3f981eaa6218c498ca9bdaa1dc94bdc0a9d6bf4.cu
#include "BatchMatMulExecution.hpp" namespace MNN { namespace CUDA { template <typename T> __global__ void transpose_bias(T *input, T *output, const T* bias, int batch, int e, int h) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < batch * e * h; index += blockDim.x * gridDim.x) { int i = index % (e*h); int b = index / (e*h); int y = i / e; output[index] = input[index] + bias[b * h + y]; } return; } BatchMatMulExecution::BatchMatMulExecution(bool transposeA, bool transposeB, Backend *backend) : Execution(backend) { mTransposeA = transposeA; mTransposeB = transposeB; } BatchMatMulExecution::~ BatchMatMulExecution() { // do nothing } ErrorCode BatchMatMulExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) { auto C = outputs[0]; auto dimensions = C->dimensions(); int batch = 1; for (int i = 0; i < dimensions - 2; ++i) { batch *= C->length(i); } auto e = C->length(dimensions-2); auto h = C->length(dimensions-1); if(inputs.size() > 2) { mTempOutput.reset(Tensor::createDevice<float>({batch*h*e})); auto res = backend()->onAcquireBuffer(mTempOutput.get(), Backend::DYNAMIC); if (!res) { return OUT_OF_MEMORY; } backend()->onReleaseBuffer(mTempOutput.get(), Backend::DYNAMIC); } return NO_ERROR; } ErrorCode BatchMatMulExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) { auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime(); auto blasHandle = runtime->cublas_handle(); const Tensor* A = inputs[0]; const Tensor* B = inputs[1]; auto dimensions = A->dimensions(); int batch = 1; for (int i = 0; i < dimensions - 2; ++i) { batch *= A->length(i); } auto w0 = inputs[0]->length(dimensions-1); auto h0 = inputs[0]->length(dimensions-2); auto C = outputs[0]; auto e = C->length(dimensions-2); auto h = C->length(dimensions-1); auto l = w0; if (mTransposeA) { l = h0; } auto APtr = (const float*)A->deviceId(); auto BPtr = (const float*)B->deviceId(); auto CDestPtr = (float*)C->deviceId(); float alpha = 1.0f; float beta = 0.0f; auto tranB = CUBLAS_OP_N; auto ldB = h; if (mTransposeB) { ldB = l; tranB = CUBLAS_OP_T; } auto tranA = CUBLAS_OP_N; auto ldA = l; if (mTransposeA) { ldA = e; tranA = CUBLAS_OP_T; } if(inputs.size() == 2) { auto status = cublasSgemmStridedBatched(blasHandle, tranB, tranA, h, e, l, &alpha, BPtr, ldB, l*h, APtr, ldA, e*l, &beta, CDestPtr, h, e*h, batch); cublas_check(status); //cudaThreadSynchronize(); } else { auto CPtr = (float*)mTempOutput->deviceId(); auto status = cublasSgemmStridedBatched(blasHandle, tranB, tranA, h, e, l, &alpha, BPtr, ldB, l*h, APtr, ldA, e*l, &beta, CPtr, h, e*h, batch); cublas_check(status); //cudaThreadSynchronize(); // Transpose batch, h, e -> batch, e, h int block_num = runtime->blocks_num(batch*e*h); int threads_num = runtime->threads_num(); transpose_bias<<<block_num, threads_num>>>((float*)CPtr, (float*)CDestPtr, (const float*)inputs[2]->deviceId(), batch, e, h); } return NO_ERROR; } class BatchMatMulCreator : public CUDABackend::Creator { public: virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op, Backend* backend) const override { auto param = op->main_as_BatchMatMulParam(); return new BatchMatMulExecution(param->adjX(), param->adjY(), backend); } }; static CUDACreatorRegister<BatchMatMulCreator> __init(OpType_BatchMatMul); } }
146fbbf8e5eb079dc648fab865b9e6d812b87865.hip
// !!! This is a file automatically generated by hipify!!! #include "../THCTensorMathCompareT.cuh" #include "THHTensor.hpp" #include "THHStream.hpp" #include "../generic/THCTensorMathCompareT.cu" #include "../THCGenerateHalfType.h"
146fbbf8e5eb079dc648fab865b9e6d812b87865.cu
#include "../THCTensorMathCompareT.cuh" #include "THCTensor.hpp" #include "THCStream.hpp" #include "../generic/THCTensorMathCompareT.cu" #include "../THCGenerateHalfType.h"
0170f47eb148e6b826e5f08ca32c744ad2bd0417.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ inline float stableSigmoid(float x) { if(x >= 0) { float z = expf(-x); return 1.0 / (1.0 + z); } else { float z = expf(x); return z / (1.0 + z); } } __global__ void gHighwayForward(float* out, const float* in1, const float* in2, const float* t, size_t length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { float sigma = stableSigmoid(t[index]); out[index] = in1[index] * sigma + in2[index] * (1.f - sigma); } } }
0170f47eb148e6b826e5f08ca32c744ad2bd0417.cu
#include "includes.h" __device__ inline float stableSigmoid(float x) { if(x >= 0) { float z = expf(-x); return 1.0 / (1.0 + z); } else { float z = expf(x); return z / (1.0 + z); } } __global__ void gHighwayForward(float* out, const float* in1, const float* in2, const float* t, size_t length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { float sigma = stableSigmoid(t[index]); out[index] = in1[index] * sigma + in2[index] * (1.f - sigma); } } }
fcb7ceeabb9268179ada31d4775a598ed43dd110.hip
// !!! This is a file automatically generated by hipify!!! #ifdef USE_LEGACY_DSLASH #include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <typeinfo> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <dslash.h> #include <sys/time.h> #include <blas_quda.h> #include <inline_ptx.h> #include <dslash_policy.cuh> namespace quda { namespace ndegtwisted { #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_NDEG_TWISTED_MASS_DIRAC #include <tm_ndeg_dslash_def.h> // Non-degenerate twisted Mass #endif #ifndef NDEGTM_SHARED_FLOATS_PER_THREAD #define NDEGTM_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // end namespace twisted using namespace ndegtwisted; #ifdef GPU_NDEG_TWISTED_MASS_DIRAC template <typename sFloat, typename gFloat> class NdegTwistedDslashCuda : public SharedDslashCuda { private: const QudaTwistDslashType dslashType; double a, b, c, d; protected: unsigned int sharedBytesPerThread() const { if (dslashParam.kernel_type == INTERIOR_KERNEL) { int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float)); return NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size; } else { return 0; } } public: NdegTwistedDslashCuda(cudaColorSpinorField *out, const GaugeField &gauge, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const QudaTwistDslashType dslashType, const double kappa, const double mu, const double epsilon, const double k, const int parity, const int dagger, const int *commOverride) : SharedDslashCuda(out, in, x, gauge, parity, dagger, commOverride), dslashType(dslashType) { a = kappa; b = mu; c = epsilon; d = k; dslashParam.a = kappa; dslashParam.a_f = kappa; dslashParam.b = mu; dslashParam.b_f = mu; dslashParam.c = epsilon; dslashParam.c_f = epsilon; dslashParam.d = k; dslashParam.d_f = k; if (dslashType != QUDA_NONDEG_DSLASH) errorQuda("Invalid dslashType for non-degenerate twisted-mass Dslash"); dslashParam.fl_stride = in->VolumeCB()/2; } virtual ~NdegTwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); } TuneKey tuneKey() const { TuneKey key = DslashCuda::tuneKey(); strcat(key.aux,",NdegDslash"); return key; } void apply(const hipStream_t &stream) { #ifdef SHARED_WILSON_DSLASH if (dslashParam.kernel_type == EXTERIOR_KERNEL_X) errorQuda("Shared dslash does not yet support X-dimension partitioning"); #endif #ifndef USE_TEXTURE_OBJECTS if (dslashParam.kernel_type == INTERIOR_KERNEL) bindSpinorTex<sFloat>(in, out, x); #endif // USE_TEXTURE_OBJECTS TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); setParam(); NDEG_TM_DSLASH(twistedNdegMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); } long long flops() const { int twisted_flops = 48; long long flops = DslashCuda::flops(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: case KERNEL_POLICY: // twisted-mass flops are done in the interior kernel flops += twisted_flops * in->VolumeCB(); break; } return flops; } }; #endif // GPU_NDEG_TWISTED_MASS_DIRAC void ndegTwistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const QudaTwistDslashType type, const double &kappa, const double &mu, const double &epsilon, const double &k, const int *commOverride, TimeProfile &profile) { #ifdef GPU_NDEG_TWISTED_MASS_DIRAC const_cast<cudaColorSpinorField*>(in)->createComms(1); DslashCuda *dslash = nullptr; if (in->Precision() == QUDA_DOUBLE_PRECISION) { dslash = new NdegTwistedDslashCuda<double2,double2>(out, gauge, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new NdegTwistedDslashCuda<float4,float4>(out, gauge, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new NdegTwistedDslashCuda<short4,short4>(out, gauge, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } int bulk_threads = in->Volume() / 2; int ghost_threads[4] = {0}; for(int i=0;i<4;i++) ghost_threads[i] = in->GhostFace()[i] / 2; dslash::DslashPolicyTune<DslashCuda> dslash_policy( *dslash, const_cast<cudaColorSpinorField *>(in), bulk_threads, ghost_threads, profile); dslash_policy.apply(0); delete dslash; #else errorQuda("Non-degenerate twisted mass dslash has not been built"); #endif } } #endif
fcb7ceeabb9268179ada31d4775a598ed43dd110.cu
#ifdef USE_LEGACY_DSLASH #include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <typeinfo> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <dslash.h> #include <sys/time.h> #include <blas_quda.h> #include <inline_ptx.h> #include <dslash_policy.cuh> namespace quda { namespace ndegtwisted { #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_NDEG_TWISTED_MASS_DIRAC #include <tm_ndeg_dslash_def.h> // Non-degenerate twisted Mass #endif #ifndef NDEGTM_SHARED_FLOATS_PER_THREAD #define NDEGTM_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // end namespace twisted using namespace ndegtwisted; #ifdef GPU_NDEG_TWISTED_MASS_DIRAC template <typename sFloat, typename gFloat> class NdegTwistedDslashCuda : public SharedDslashCuda { private: const QudaTwistDslashType dslashType; double a, b, c, d; protected: unsigned int sharedBytesPerThread() const { if (dslashParam.kernel_type == INTERIOR_KERNEL) { int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float)); return NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size; } else { return 0; } } public: NdegTwistedDslashCuda(cudaColorSpinorField *out, const GaugeField &gauge, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const QudaTwistDslashType dslashType, const double kappa, const double mu, const double epsilon, const double k, const int parity, const int dagger, const int *commOverride) : SharedDslashCuda(out, in, x, gauge, parity, dagger, commOverride), dslashType(dslashType) { a = kappa; b = mu; c = epsilon; d = k; dslashParam.a = kappa; dslashParam.a_f = kappa; dslashParam.b = mu; dslashParam.b_f = mu; dslashParam.c = epsilon; dslashParam.c_f = epsilon; dslashParam.d = k; dslashParam.d_f = k; if (dslashType != QUDA_NONDEG_DSLASH) errorQuda("Invalid dslashType for non-degenerate twisted-mass Dslash"); dslashParam.fl_stride = in->VolumeCB()/2; } virtual ~NdegTwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); } TuneKey tuneKey() const { TuneKey key = DslashCuda::tuneKey(); strcat(key.aux,",NdegDslash"); return key; } void apply(const cudaStream_t &stream) { #ifdef SHARED_WILSON_DSLASH if (dslashParam.kernel_type == EXTERIOR_KERNEL_X) errorQuda("Shared dslash does not yet support X-dimension partitioning"); #endif #ifndef USE_TEXTURE_OBJECTS if (dslashParam.kernel_type == INTERIOR_KERNEL) bindSpinorTex<sFloat>(in, out, x); #endif // USE_TEXTURE_OBJECTS TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); setParam(); NDEG_TM_DSLASH(twistedNdegMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); } long long flops() const { int twisted_flops = 48; long long flops = DslashCuda::flops(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: case KERNEL_POLICY: // twisted-mass flops are done in the interior kernel flops += twisted_flops * in->VolumeCB(); break; } return flops; } }; #endif // GPU_NDEG_TWISTED_MASS_DIRAC void ndegTwistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const QudaTwistDslashType type, const double &kappa, const double &mu, const double &epsilon, const double &k, const int *commOverride, TimeProfile &profile) { #ifdef GPU_NDEG_TWISTED_MASS_DIRAC const_cast<cudaColorSpinorField*>(in)->createComms(1); DslashCuda *dslash = nullptr; if (in->Precision() == QUDA_DOUBLE_PRECISION) { dslash = new NdegTwistedDslashCuda<double2,double2>(out, gauge, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new NdegTwistedDslashCuda<float4,float4>(out, gauge, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new NdegTwistedDslashCuda<short4,short4>(out, gauge, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } int bulk_threads = in->Volume() / 2; int ghost_threads[4] = {0}; for(int i=0;i<4;i++) ghost_threads[i] = in->GhostFace()[i] / 2; dslash::DslashPolicyTune<DslashCuda> dslash_policy( *dslash, const_cast<cudaColorSpinorField *>(in), bulk_threads, ghost_threads, profile); dslash_policy.apply(0); delete dslash; #else errorQuda("Non-degenerate twisted mass dslash has not been built"); #endif } } #endif
b2725791e5c4c7e793f982a99a15099742d9bcb4.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THHUNN/generic/HardTanh.hip" #else #include <THHUNN/common.h> void THNN_(HardTanh_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, accreal min_val_, accreal max_val_, bool inplace) { scalar_t min_val = ScalarConvert<accreal, scalar_t>::to(min_val_); scalar_t max_val = ScalarConvert<accreal, scalar_t>::to(max_val_); THCUNN_assertSameGPU(state, 2, input, output); if(inplace) { THCTensor_(set)(state, output, input); THC_pointwiseApply1<scalar_t>(state, output, hardtanhupdateOutput_functor<scalar_t>(min_val, max_val)); } else { THCTensor_(resizeAs)(state, output, input); THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, hardtanhupdateOutput_functor<scalar_t>(min_val, max_val)); } } void THNN_(HardTanh_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, accreal min_val_, accreal max_val_, bool inplace) { scalar_t min_val = ScalarConvert<accreal, scalar_t>::to(min_val_); scalar_t max_val = ScalarConvert<accreal, scalar_t>::to(max_val_); THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); if (inplace) { THCTensor_(set)(state, gradInput, gradOutput); THC_pointwiseApply2<scalar_t, scalar_t>(state, gradInput, input, hardtanhupdateGradInput_functor<scalar_t>(min_val, max_val)); } else { THCTensor_(resizeAs)(state, gradInput, input); THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, input, gradOutput, hardtanhupdateGradInput_functor<scalar_t>(min_val, max_val)); } } #endif
b2725791e5c4c7e793f982a99a15099742d9bcb4.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THCUNN/generic/HardTanh.cu" #else #include <THCUNN/common.h> void THNN_(HardTanh_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, accreal min_val_, accreal max_val_, bool inplace) { scalar_t min_val = ScalarConvert<accreal, scalar_t>::to(min_val_); scalar_t max_val = ScalarConvert<accreal, scalar_t>::to(max_val_); THCUNN_assertSameGPU(state, 2, input, output); if(inplace) { THCTensor_(set)(state, output, input); THC_pointwiseApply1<scalar_t>(state, output, hardtanhupdateOutput_functor<scalar_t>(min_val, max_val)); } else { THCTensor_(resizeAs)(state, output, input); THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, hardtanhupdateOutput_functor<scalar_t>(min_val, max_val)); } } void THNN_(HardTanh_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, accreal min_val_, accreal max_val_, bool inplace) { scalar_t min_val = ScalarConvert<accreal, scalar_t>::to(min_val_); scalar_t max_val = ScalarConvert<accreal, scalar_t>::to(max_val_); THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); if (inplace) { THCTensor_(set)(state, gradInput, gradOutput); THC_pointwiseApply2<scalar_t, scalar_t>(state, gradInput, input, hardtanhupdateGradInput_functor<scalar_t>(min_val, max_val)); } else { THCTensor_(resizeAs)(state, gradInput, input); THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, input, gradOutput, hardtanhupdateGradInput_functor<scalar_t>(min_val, max_val)); } } #endif
c76306603d046fd90be63b2dbfb0e6cc6d415c27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file bf_parallel.cu * @author Federico Picchi * @brief CUDA-based implementation of Bilateral Filter * @date June 2021 * * @copyright Copyright (c) 2021 * */ #include "bilateral_filter/bf.hpp" #if defined(_DEBUG) #include <iostream> #endif inline void handleError(hipError_t err, int line) { #if defined(_DEBUG) if (err) { std::cerr << __FILE__ << ": ERROR " << err << " CALLING CUDA FUNCTION IN LINE: " << line << "\n"; exit(1); } #endif return; } __global__ void bf_parallel_k(const uchar* const source, uchar* const destination, const int diameter, const double* const gi, const double* const gs, const int* const space_coord, const int maxk, const int width, const int height, const size_t s_step, const size_t d_step) { // Shared memory setup extern __shared__ double shared[]; double* const gi_s = (double*)shared; double* const gs_s = gi_s + 256; int* const space_coord_s = (int*)&gs_s[diameter * diameter]; uchar* const tile_s = (uchar*)&space_coord_s[diameter * diameter]; // Ids and vals setup const int radius = diameter / 2; const int global_j = (int)(threadIdx.x + blockIdx.x * blockDim.x) - radius * (int)(1 + 2 * blockIdx.x); const int global_i = (int)(threadIdx.y + blockIdx.y * blockDim.y) - radius * (int)(1 + 2 * blockIdx.y); const int sharedId = threadIdx.y * blockDim.x + threadIdx.x; // Copy from global memory to shared memory if (sharedId < 256) gi_s[sharedId] = gi[sharedId]; if (sharedId < diameter * diameter) { space_coord_s[sharedId] = space_coord[sharedId]; gs_s[sharedId] = gs[sharedId]; } if (global_i >= height + radius || global_j >= width + radius) return; tile_s[sharedId] = source[(global_i + radius) * s_step + radius + global_j]; if (global_i >= height || global_j >= width) return; if (threadIdx.x < radius || threadIdx.x >= blockDim.x - radius || threadIdx.y < radius || threadIdx.y >= blockDim.y - radius) return; __syncthreads(); // Calc new pixel value double sum = 0, wsum = 0; const int val0 = tile_s[sharedId]; //< Center of the template. for (int k = 0; k < maxk; k++) { const int val = tile_s[sharedId + space_coord_s[k]]; // The weight is gaussian space * color space. const double w = gs_s[k] * gi_s[abs(val - val0)]; sum += val * w; wsum += w; } destination[global_j + global_i * d_step] = (uchar)lround(sum / wsum); } inline size_t calcBytesNeeded(const int blockSize, const int diameter) { return (size_t)blockSize + 256 * sizeof(double) + diameter * diameter * (sizeof(int) + sizeof(double)); } cv::Mat bf_parallel(const cv::Mat &source, const int diameter, const double sigma_i, const double sigma_s) { const int radius = diameter / 2; //Calculate optimal CUDA configuration int blockSize = 1024; int minGridSize; size_t bytesNeeded = calcBytesNeeded(blockSize, diameter); hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, bf_parallel_k, bytesNeeded, 0); // Recalc SM bytes needed bytesNeeded = calcBytesNeeded(blockSize, diameter); // Round up according to matrix size blockSize = (int)sqrt(blockSize); dim3 blockSize_2D(blockSize, blockSize); blockSize -= 2*radius; dim3 gridSize_2D((source.cols + blockSize - 1) / blockSize, (source.rows + blockSize - 1) / blockSize); // Create destination matrix cv::Mat dst = cv::Mat::zeros(source.rows, source.cols, CV_8U); // Create an image with a border. cv::Mat temp; cv::copyMakeBorder(source, temp, radius, radius, radius, radius, cv::BorderTypes::BORDER_REFLECT_101); // Init color weight. double coeff_i = -0.5 / (sigma_i * sigma_i); std::vector<double> gi_vec(256); double *gi = &gi_vec[0]; for (int i = 0; i < 256; i++) gi[i] = exp(i * i * coeff_i); // Generate gaussian space. std::vector<double> gs_vec(diameter * diameter); std::vector<int> space_coord_vec(diameter * diameter); //< Save here coord. double *gs = &gs_vec[0]; int *space_coord = &space_coord_vec[0]; const double coeff_s = -0.5 / (sigma_s * sigma_s); int maxk = 0; for (int i = -radius; i <= radius; i++) { for (int j = -radius; j <= radius; j++) { double r = sqrt(i * i + j * j); if (r > radius) //< Circle. continue; gs[maxk] = exp(r * r * coeff_s); space_coord[maxk++] = i * (int)blockSize_2D.x + j; } } // Copy data to device uchar* temp_d; handleError(hipMalloc(&temp_d, temp.total()), __LINE__); handleError(hipMemcpy(temp_d, temp.data, temp.total(), hipMemcpyHostToDevice), __LINE__); uchar* dst_d; handleError(hipMalloc(&dst_d, dst.total()), __LINE__); handleError(hipMemcpy(dst_d, dst.data, dst.total(), hipMemcpyHostToDevice), __LINE__); double* gs_d; handleError(hipMalloc(&gs_d, diameter * diameter * sizeof(double)), __LINE__); handleError(hipMemcpy(gs_d, gs, diameter * diameter * sizeof(double), hipMemcpyHostToDevice), __LINE__); double* gi_d; handleError(hipMalloc(&gi_d, 256 * sizeof(double)), __LINE__); handleError(hipMemcpy(gi_d, gi, 256 * sizeof(double), hipMemcpyHostToDevice), __LINE__); int* space_coord_d; handleError(hipMalloc(&space_coord_d, diameter * diameter * sizeof(int)), __LINE__); handleError(hipMemcpy(space_coord_d, space_coord, diameter * diameter * sizeof(int), hipMemcpyHostToDevice), __LINE__); // Filtering process bf_parallel_k << < gridSize_2D, blockSize_2D, bytesNeeded >> > (temp_d, dst_d, diameter, gi_d, gs_d, space_coord_d, maxk, (int)source.cols, (int)source.rows, temp.step, dst.step); handleError(hipDeviceSynchronize(), __LINE__); // Copy data from device handleError(hipMemcpy(dst.data, dst_d, dst.total(), hipMemcpyDeviceToHost), __LINE__); handleError(hipFree(temp_d), __LINE__); handleError(hipFree(dst_d), __LINE__); handleError(hipFree(gs_d), __LINE__); handleError(hipFree(gi_d), __LINE__); handleError(hipFree(space_coord_d), __LINE__); return dst; }
c76306603d046fd90be63b2dbfb0e6cc6d415c27.cu
/** * @file bf_parallel.cu * @author Federico Picchi * @brief CUDA-based implementation of Bilateral Filter * @date June 2021 * * @copyright Copyright (c) 2021 * */ #include "bilateral_filter/bf.hpp" #if defined(_DEBUG) #include <iostream> #endif inline void handleError(cudaError_t err, int line) { #if defined(_DEBUG) if (err) { std::cerr << __FILE__ << ": ERROR " << err << " CALLING CUDA FUNCTION IN LINE: " << line << "\n"; exit(1); } #endif return; } __global__ void bf_parallel_k(const uchar* const source, uchar* const destination, const int diameter, const double* const gi, const double* const gs, const int* const space_coord, const int maxk, const int width, const int height, const size_t s_step, const size_t d_step) { // Shared memory setup extern __shared__ double shared[]; double* const gi_s = (double*)shared; double* const gs_s = gi_s + 256; int* const space_coord_s = (int*)&gs_s[diameter * diameter]; uchar* const tile_s = (uchar*)&space_coord_s[diameter * diameter]; // Ids and vals setup const int radius = diameter / 2; const int global_j = (int)(threadIdx.x + blockIdx.x * blockDim.x) - radius * (int)(1 + 2 * blockIdx.x); const int global_i = (int)(threadIdx.y + blockIdx.y * blockDim.y) - radius * (int)(1 + 2 * blockIdx.y); const int sharedId = threadIdx.y * blockDim.x + threadIdx.x; // Copy from global memory to shared memory if (sharedId < 256) gi_s[sharedId] = gi[sharedId]; if (sharedId < diameter * diameter) { space_coord_s[sharedId] = space_coord[sharedId]; gs_s[sharedId] = gs[sharedId]; } if (global_i >= height + radius || global_j >= width + radius) return; tile_s[sharedId] = source[(global_i + radius) * s_step + radius + global_j]; if (global_i >= height || global_j >= width) return; if (threadIdx.x < radius || threadIdx.x >= blockDim.x - radius || threadIdx.y < radius || threadIdx.y >= blockDim.y - radius) return; __syncthreads(); // Calc new pixel value double sum = 0, wsum = 0; const int val0 = tile_s[sharedId]; //< Center of the template. for (int k = 0; k < maxk; k++) { const int val = tile_s[sharedId + space_coord_s[k]]; // The weight is gaussian space * color space. const double w = gs_s[k] * gi_s[abs(val - val0)]; sum += val * w; wsum += w; } destination[global_j + global_i * d_step] = (uchar)lround(sum / wsum); } inline size_t calcBytesNeeded(const int blockSize, const int diameter) { return (size_t)blockSize + 256 * sizeof(double) + diameter * diameter * (sizeof(int) + sizeof(double)); } cv::Mat bf_parallel(const cv::Mat &source, const int diameter, const double sigma_i, const double sigma_s) { const int radius = diameter / 2; //Calculate optimal CUDA configuration int blockSize = 1024; int minGridSize; size_t bytesNeeded = calcBytesNeeded(blockSize, diameter); cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, bf_parallel_k, bytesNeeded, 0); // Recalc SM bytes needed bytesNeeded = calcBytesNeeded(blockSize, diameter); // Round up according to matrix size blockSize = (int)sqrt(blockSize); dim3 blockSize_2D(blockSize, blockSize); blockSize -= 2*radius; dim3 gridSize_2D((source.cols + blockSize - 1) / blockSize, (source.rows + blockSize - 1) / blockSize); // Create destination matrix cv::Mat dst = cv::Mat::zeros(source.rows, source.cols, CV_8U); // Create an image with a border. cv::Mat temp; cv::copyMakeBorder(source, temp, radius, radius, radius, radius, cv::BorderTypes::BORDER_REFLECT_101); // Init color weight. double coeff_i = -0.5 / (sigma_i * sigma_i); std::vector<double> gi_vec(256); double *gi = &gi_vec[0]; for (int i = 0; i < 256; i++) gi[i] = exp(i * i * coeff_i); // Generate gaussian space. std::vector<double> gs_vec(diameter * diameter); std::vector<int> space_coord_vec(diameter * diameter); //< Save here coord. double *gs = &gs_vec[0]; int *space_coord = &space_coord_vec[0]; const double coeff_s = -0.5 / (sigma_s * sigma_s); int maxk = 0; for (int i = -radius; i <= radius; i++) { for (int j = -radius; j <= radius; j++) { double r = sqrt(i * i + j * j); if (r > radius) //< Circle. continue; gs[maxk] = exp(r * r * coeff_s); space_coord[maxk++] = i * (int)blockSize_2D.x + j; } } // Copy data to device uchar* temp_d; handleError(cudaMalloc(&temp_d, temp.total()), __LINE__); handleError(cudaMemcpy(temp_d, temp.data, temp.total(), cudaMemcpyHostToDevice), __LINE__); uchar* dst_d; handleError(cudaMalloc(&dst_d, dst.total()), __LINE__); handleError(cudaMemcpy(dst_d, dst.data, dst.total(), cudaMemcpyHostToDevice), __LINE__); double* gs_d; handleError(cudaMalloc(&gs_d, diameter * diameter * sizeof(double)), __LINE__); handleError(cudaMemcpy(gs_d, gs, diameter * diameter * sizeof(double), cudaMemcpyHostToDevice), __LINE__); double* gi_d; handleError(cudaMalloc(&gi_d, 256 * sizeof(double)), __LINE__); handleError(cudaMemcpy(gi_d, gi, 256 * sizeof(double), cudaMemcpyHostToDevice), __LINE__); int* space_coord_d; handleError(cudaMalloc(&space_coord_d, diameter * diameter * sizeof(int)), __LINE__); handleError(cudaMemcpy(space_coord_d, space_coord, diameter * diameter * sizeof(int), cudaMemcpyHostToDevice), __LINE__); // Filtering process bf_parallel_k << < gridSize_2D, blockSize_2D, bytesNeeded >> > (temp_d, dst_d, diameter, gi_d, gs_d, space_coord_d, maxk, (int)source.cols, (int)source.rows, temp.step, dst.step); handleError(cudaDeviceSynchronize(), __LINE__); // Copy data from device handleError(cudaMemcpy(dst.data, dst_d, dst.total(), cudaMemcpyDeviceToHost), __LINE__); handleError(cudaFree(temp_d), __LINE__); handleError(cudaFree(dst_d), __LINE__); handleError(cudaFree(gs_d), __LINE__); handleError(cudaFree(gi_d), __LINE__); handleError(cudaFree(space_coord_d), __LINE__); return dst; }
6036fdba870504fbe74e0c5a8e156e2b47d57287.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "simple.hpp" /*The CUDA kernel */ __global__ void vector_add_cu(float *out, float *a, float *b, int n){ for(int i = 0; i < n; i++){ out[i] = a[i] + b[i]; } } /* Implementation of the function to be wrapped by Cython */ void addition(float *out, float *a, float *b, int N){ float *d_a, *d_b, *d_out; hipMalloc((void**)&d_a, sizeof(float)*N); hipMalloc((void**)&d_b, sizeof(float)*N); hipMalloc((void**)&d_out, sizeof(float)*N); hipMemcpy(d_a, a, sizeof(float)*N, hipMemcpyHostToDevice); hipMemcpy(d_b, b, sizeof(float)*N, hipMemcpyHostToDevice); hipLaunchKernelGGL(( vector_add_cu), dim3(1), dim3(1), 0, 0, d_out, d_a, d_b, N); hipMemcpy(out, d_out, sizeof(float)*N, hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_b); hipFree(d_out); }
6036fdba870504fbe74e0c5a8e156e2b47d57287.cu
#include "simple.hpp" /*The CUDA kernel */ __global__ void vector_add_cu(float *out, float *a, float *b, int n){ for(int i = 0; i < n; i++){ out[i] = a[i] + b[i]; } } /* Implementation of the function to be wrapped by Cython */ void addition(float *out, float *a, float *b, int N){ float *d_a, *d_b, *d_out; cudaMalloc((void**)&d_a, sizeof(float)*N); cudaMalloc((void**)&d_b, sizeof(float)*N); cudaMalloc((void**)&d_out, sizeof(float)*N); cudaMemcpy(d_a, a, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(float)*N, cudaMemcpyHostToDevice); vector_add_cu<<<1, 1>>>(d_out, d_a, d_b, N); cudaMemcpy(out, d_out, sizeof(float)*N, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_out); }
37b19667c7c644ebf561c0fd666c16a6f518f910.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ndt_gpu/NormalDistributionsTransform.h" #include "ndt_gpu/debug.h" #include <cmath> #include <iostream> #include <pcl/common/transforms.h> #include "glog/logging.h" using std::endl; namespace gpu { GNormalDistributionsTransform::GNormalDistributionsTransform() { //GRegistration::GRegistration(); gauss_d1_ = gauss_d2_ = 0; outlier_ratio_ = 0.55; step_size_ = 0.1; resolution_ = 1.0f; trans_probability_ = 0; double gauss_c1, gauss_c2, gauss_d3; // Initializes the guassian fitting parameters (eq. 6.8) [Magnusson 2009] gauss_c1 = 10.0 * (1 - outlier_ratio_); gauss_c2 = outlier_ratio_ / pow (resolution_, 3); gauss_d3 = -log (gauss_c2); gauss_d1_ = -log ( gauss_c1 + gauss_c2 ) - gauss_d3; gauss_d2_ = -2 * log ((-log ( gauss_c1 * exp ( -0.5 ) + gauss_c2 ) - gauss_d3) / gauss_d1_); transformation_epsilon_ = 0.1; max_iterations_ = 35; j_ang_ = MatrixHost(24, 1); h_ang_ = MatrixHost(45, 1); dj_ang_ = MatrixDevice(24, 1); dh_ang_ = MatrixDevice(45, 1); real_iterations_ = 0; } GNormalDistributionsTransform::GNormalDistributionsTransform(const GNormalDistributionsTransform &other) { gauss_d1_ = other.gauss_d1_; gauss_d2_ = other.gauss_d2_; outlier_ratio_ = other.outlier_ratio_; j_ang_ = other.j_ang_; h_ang_ = other.h_ang_; dj_ang_ = other.dj_ang_; dh_ang_ = other.dh_ang_; step_size_ = other.step_size_; resolution_ = other.resolution_; trans_probability_ = other.trans_probability_; real_iterations_ = other.real_iterations_; voxel_grid_ = other.voxel_grid_; } GNormalDistributionsTransform::~GNormalDistributionsTransform() { dj_ang_.memFree(); dh_ang_.memFree(); } void GNormalDistributionsTransform::setStepSize(double step_size) { step_size_ = step_size; } void GNormalDistributionsTransform::setResolution(float resolution) { resolution_ = resolution; } void GNormalDistributionsTransform::setOutlierRatio(double olr) { outlier_ratio_ = olr; } double GNormalDistributionsTransform::getStepSize() const { return step_size_; } float GNormalDistributionsTransform::getResolution() const { return resolution_; } double GNormalDistributionsTransform::getOutlierRatio() const { return outlier_ratio_; } double GNormalDistributionsTransform::getTransformationProbability() const { return trans_probability_; } int GNormalDistributionsTransform::getRealIterations() { return real_iterations_; } double GNormalDistributionsTransform::auxilaryFunction_PsiMT(double a, double f_a, double f_0, double g_0, double mu) { return (f_a - f_0 - mu * g_0 * a); } double GNormalDistributionsTransform::auxilaryFunction_dPsiMT(double g_a, double g_0, double mu) { return (g_a - mu * g_0); } void GNormalDistributionsTransform::setInputTarget(pcl::PointCloud<pcl::PointXYZI>::Ptr input) { // Copy input map data from the host memory to the GPU memory GRegistration::setInputTarget(input); LOG(INFO)<<"memcpy host to gpu."<<endl; // Build the voxel grid if (target_points_number_ != 0) { voxel_grid_.setLeafSize(resolution_, resolution_, resolution_); voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_); } LOG(INFO)<<"leafsize set."<<endl; } void GNormalDistributionsTransform::setInputTarget(pcl::PointCloud<pcl::PointXYZ>::Ptr input) { // Copy input map data from the host memory to the GPU memory GRegistration::setInputTarget(input); // Build the voxel grid if (target_points_number_ != 0) { voxel_grid_.setLeafSize(resolution_, resolution_, resolution_); voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_); } } void GNormalDistributionsTransform::computeTransformation(const Eigen::Matrix<float, 4, 4> &guess) { if (dj_ang_.isEmpty()) { dj_ang_.memAlloc(); } if (dh_ang_.isEmpty()) { dh_ang_.memAlloc(); } nr_iterations_ = 0; converged_ = false; double gauss_c1, gauss_c2, gauss_d3; gauss_c1 = 10 * ( 1 - outlier_ratio_); gauss_c2 = outlier_ratio_ / pow(resolution_, 3); gauss_d3 = - log(gauss_c2); gauss_d1_ = -log(gauss_c1 + gauss_c2) - gauss_d3; gauss_d2_ = -2 * log((-log(gauss_c1 * exp(-0.5) + gauss_c2) - gauss_d3) / gauss_d1_); if (guess != Eigen::Matrix4f::Identity()) { final_transformation_ = guess; transformPointCloud(x_, y_, z_, trans_x_, trans_y_, trans_z_, points_number_, guess); } Eigen::Transform<float, 3, Eigen::Affine, Eigen::ColMajor> eig_transformation; eig_transformation.matrix() = final_transformation_; Eigen::Matrix<double, 6, 1> p, delta_p, score_gradient; Eigen::Vector3f init_translation = eig_transformation.translation(); Eigen::Vector3f init_rotation = eig_transformation.rotation().eulerAngles(0, 1, 2); p << init_translation(0), init_translation(1), init_translation(2), init_rotation(0), init_rotation(1), init_rotation(2); Eigen::Matrix<double, 6, 6> hessian; double score = 0; double delta_p_norm; score = computeDerivatives(score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_, p); int loop_time = 0; while (!converged_) { previous_transformation_ = transformation_; Eigen::JacobiSVD<Eigen::Matrix<double, 6, 6>> sv(hessian, Eigen::ComputeFullU | Eigen::ComputeFullV); delta_p = sv.solve(-score_gradient); delta_p_norm = delta_p.norm(); if (delta_p_norm == 0 || delta_p_norm != delta_p_norm) { trans_probability_ = score / static_cast<double>(points_number_); converged_ = delta_p_norm == delta_p_norm; return; } delta_p.normalize(); delta_p_norm = computeStepLengthMT(p, delta_p, delta_p_norm, step_size_, transformation_epsilon_ / 2, score, score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_); delta_p *= delta_p_norm; Eigen::Translation<float, 3> translation(static_cast<float>(delta_p(0)), static_cast<float>(delta_p(1)), static_cast<float>(delta_p(2))); Eigen::AngleAxis<float> tmp1(static_cast<float>(delta_p(3)), Eigen::Vector3f::UnitX()); Eigen::AngleAxis<float> tmp2(static_cast<float>(delta_p(4)), Eigen::Vector3f::UnitY()); Eigen::AngleAxis<float> tmp3(static_cast<float>(delta_p(5)), Eigen::Vector3f::UnitZ()); Eigen::AngleAxis<float> tmp4(tmp1 * tmp2 * tmp3); transformation_ = (translation * tmp4).matrix(); p = p + delta_p; //Not update visualizer if (nr_iterations_ > max_iterations_ || (nr_iterations_ && (::fabs(delta_p_norm) < transformation_epsilon_))) converged_ = true; nr_iterations_++; loop_time++; } trans_probability_ = score / static_cast<double>(points_number_); } /* First step of computing point gradients */ __global__ void computePointGradients0(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dj_ang, double *pg00, double *pg11, double *pg22, double *pg13, double *pg23, double *pg04, double *pg14) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double j_ang[12]; if (threadIdx.x < 12) { j_ang[threadIdx.x] = dj_ang[threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); //Set the 3x3 block start from (0, 0) to identity matrix pg00[i] = 1; pg11[i] = 1; pg22[i] = 1; //Compute point derivatives pg13[i] = o_x * j_ang[0] + o_y * j_ang[1] + o_z * j_ang[2]; pg23[i] = o_x * j_ang[3] + o_y * j_ang[4] + o_z * j_ang[5]; pg04[i] = o_x * j_ang[6] + o_y * j_ang[7] + o_z * j_ang[8]; pg14[i] = o_x * j_ang[9] + o_y * j_ang[10] + o_z * j_ang[11]; } } /* Second step of computing point gradients */ __global__ void computePointGradients1(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dj_ang, double *pg24, double *pg05, double *pg15, double *pg25) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double j_ang[12]; if (threadIdx.x < 12) { j_ang[threadIdx.x] = dj_ang[threadIdx.x + 12]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); //Compute point derivatives pg24[i] = o_x * j_ang[0] + o_y * j_ang[1] + o_z * j_ang[2]; pg05[i] = o_x * j_ang[3] + o_y * j_ang[4] + o_z * j_ang[5]; pg15[i] = o_x * j_ang[6] + o_y * j_ang[7] + o_z * j_ang[8]; pg25[i] = o_x * j_ang[9] + o_y * j_ang[10] + o_z * j_ang[11]; } } /* First step of computing point hessians */ __global__ void computePointHessian0(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dh_ang, double *ph93, double *ph103, double *ph113, double *ph123, double *ph94, double *ph133, double *ph104, double *ph143, double *ph114, double *ph153, double *ph95, double *ph163, double *ph105, double *ph173, double *ph115) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double h_ang[18]; if (threadIdx.x < 18) { h_ang[threadIdx.x] = dh_ang[threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); ph93[i] = 0; ph103[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2]; ph113[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5]; ph123[i] = ph94[i] = 0; ph133[i] = ph104[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8]; ph143[i] = ph114[i] = o_x * h_ang[9] + o_y * h_ang[10] + o_z * h_ang[11]; ph153[i] = ph95[i] = 0; ph163[i] = ph105[i] = o_x * h_ang[12] + o_y * h_ang[13] + o_z * h_ang[14]; ph173[i] = ph115[i] = o_x * h_ang[15] + o_y * h_ang[16] + o_z * h_ang[17]; } } __global__ void computePointHessian1(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dh_ang, double *ph124, double *ph134, double *ph144, double *ph154, double *ph125, double *ph164, double *ph135, double *ph174, double *ph145) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double h_ang[18]; if (threadIdx.x < 18) { h_ang[threadIdx.x] = dh_ang[18 + threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); ph124[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2]; ph134[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5]; ph144[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8]; ph154[i] = ph125[i] = o_x * h_ang[9] + o_y * h_ang[10] + o_z * h_ang[11]; ph164[i] = ph135[i] = o_x * h_ang[12] + o_y * h_ang[13] + o_z * h_ang[14]; ph174[i] = ph145[i] = o_x * h_ang[15] + o_y * h_ang[16] + o_z * h_ang[17]; } } __global__ void computePointHessian2(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dh_ang, double *ph155, double *ph165, double *ph175) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double h_ang[9]; if (threadIdx.x < 9) { h_ang[threadIdx.x] = dh_ang[36 + threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); ph155[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2]; ph165[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5]; ph175[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8]; } } /* compute score_inc list for input points. * The final score_inc is calculated by a reduction sum * on this score_inc list. */ __global__ void computeScoreList(int *starting_voxel_id, int *voxel_id, int valid_points_num, double *e_x_cov_x, double gauss_d1, double *score) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < valid_points_num; i += stride) { double score_inc = 0; for (int vid = starting_voxel_id[i]; vid < starting_voxel_id[i + 1]; vid++) { double tmp_ex = e_x_cov_x[vid]; score_inc += (tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex) ? 0 : -gauss_d1 * tmp_ex; } score[i] = score_inc; } } /* First step to compute score gradient list for input points */ __global__ void computeScoreGradientList(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, int voxel_num, double *e_x_cov_x, double *cov_dxd_pi, double gauss_d1, int valid_voxel_num, double *score_gradients) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int col = blockIdx.y; if (col < 6) { double *sg = score_gradients + col * valid_points_num; double *cov_dxd_pi_mat0 = cov_dxd_pi + col * valid_voxel_num; double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num; double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double tmp_sg = 0.0; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; double tmp_ex = e_x_cov_x[j]; if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) { tmp_ex *= gauss_d1; tmp_sg += ((d_x - centroid_x[vid]) * cov_dxd_pi_mat0[j] + (d_y - centroid_y[vid]) * cov_dxd_pi_mat1[j] + (d_z - centroid_z[vid]) * cov_dxd_pi_mat2[j]) * tmp_ex; } } sg[i] = tmp_sg; } } } /* Intermediate step to compute e_x_cov_x */ __global__ void computeExCovX(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centr_x, double *centr_y, double *centr_z, double gauss_d1, double gauss_d2, double *e_x_cov_x, double *icov00, double *icov01, double *icov02, double *icov10, double *icov11, double *icov12, double *icov20, double *icov21, double *icov22) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double t_x, t_y, t_z; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; t_x = d_x - centr_x[vid]; t_y = d_y - centr_y[vid]; t_z = d_z - centr_z[vid]; e_x_cov_x[j] = exp(-gauss_d2 * ((t_x * icov00[vid] + t_y * icov01[vid] + t_z * icov02[vid]) * t_x + ((t_x * icov10[vid] + t_y * icov11[vid] + t_z * icov12[vid]) * t_y) + ((t_x * icov20[vid] + t_y * icov21[vid] + t_z * icov22[vid]) * t_z)) / 2.0); } } } /* update e_x_cov_x - Reusable portion of Equation 6.12 and 6.13 [Magnusson 2009] */ __global__ void updateExCovX(double *e_x_cov_x, double gauss_d2, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < valid_voxel_num; i += stride) { e_x_cov_x[i] *= gauss_d2; } } /* compute cov_dxd_pi as reusable portion of Equation 6.12 and 6.13 [Magnusson 2009]*/ __global__ void computeCovDxdPi(int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *inverse_covariance, int voxel_num, double gauss_d1, double gauss_d2, double *point_gradients, double *cov_dxd_pi, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; if (row < 3 && col < 6) { double *icov0 = inverse_covariance + row * 3 * voxel_num; double *icov1 = icov0 + voxel_num; double *icov2 = icov1 + voxel_num; double *cov_dxd_pi_tmp = cov_dxd_pi + (row * 6 + col) * valid_voxel_num; double *pg_tmp0 = point_gradients + col * valid_points_num; double *pg_tmp1 = pg_tmp0 + 6 * valid_points_num; double *pg_tmp2 = pg_tmp1 + 6 * valid_points_num; for (int i = id; i < valid_points_num; i += stride) { double pg0 = pg_tmp0[i]; double pg1 = pg_tmp1[i]; double pg2 = pg_tmp2[i]; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; cov_dxd_pi_tmp[j] = icov0[vid] * pg0 + icov1[vid] * pg1 + icov2[vid] * pg2; } } } } /* First step to compute hessian list for input points */ __global__ void computeHessianListS0(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, double *icov00, double *icov01, double *icov02, double *icov10, double *icov11, double *icov12, double *icov20, double *icov21, double *icov22, double *point_gradients, double *tmp_hessian, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int col = blockIdx.y; if (col < 6) { double *tmp_pg0 = point_gradients + col * valid_points_num; double *tmp_pg1 = tmp_pg0 + 6 * valid_points_num; double *tmp_pg2 = tmp_pg1 + 6 * valid_points_num; double *tmp_h = tmp_hessian + col * valid_voxel_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double pg0 = tmp_pg0[i]; double pg1 = tmp_pg1[i]; double pg2 = tmp_pg2[i]; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; tmp_h[j] = (d_x - centroid_x[vid]) * (icov00[vid] * pg0 + icov01[vid] * pg1 + icov02[vid] * pg2) + (d_y - centroid_y[vid]) * (icov10[vid] * pg0 + icov11[vid] * pg1 + icov12[vid] * pg2) + (d_z - centroid_z[vid]) * (icov20[vid] * pg0 + icov21[vid] * pg1 + icov22[vid] * pg2); } } } } /* Fourth step to compute hessian list */ __global__ void computeHessianListS1(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, double gauss_d1, double gauss_d2, double *hessians, double *e_x_cov_x, double *tmp_hessian, double *cov_dxd_pi, double *point_gradients, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; if (row < 6 && col < 6) { double *cov_dxd_pi_mat0 = cov_dxd_pi + row * valid_voxel_num; double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num; double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num; double *tmp_h = tmp_hessian + col * valid_voxel_num; double *h = hessians + (row * 6 + col) * valid_points_num; double *tmp_pg0 = point_gradients + col * valid_points_num; double *tmp_pg1 = tmp_pg0 + 6 * valid_points_num; double *tmp_pg2 = tmp_pg1 + 6 * valid_points_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double pg0 = tmp_pg0[i]; double pg1 = tmp_pg1[i]; double pg2 = tmp_pg2[i]; double final_hessian = 0.0; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { //Transformed coordinates int vid = voxel_id[j]; double tmp_ex = e_x_cov_x[j]; if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) { double cov_dxd0 = cov_dxd_pi_mat0[j]; double cov_dxd1 = cov_dxd_pi_mat1[j]; double cov_dxd2 = cov_dxd_pi_mat2[j]; tmp_ex *= gauss_d1; final_hessian += -gauss_d2 * ((d_x - centroid_x[vid]) * cov_dxd0 + (d_y - centroid_y[vid]) * cov_dxd1 + (d_z - centroid_z[vid]) * cov_dxd2) * tmp_h[j] * tmp_ex; final_hessian += (pg0 * cov_dxd0 + pg1 * cov_dxd1 + pg2 * cov_dxd2) * tmp_ex; } } h[i] = final_hessian; } } } __global__ void computeHessianListS2(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, double gauss_d1, double *e_x_cov_x, double *icov00, double *icov01, double *icov02, double *icov10, double *icov11, double *icov12, double *icov20, double *icov21, double *icov22, double *point_hessians, double *hessians, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; if (row < 6 && col < 6) { double *h = hessians + (row * 6 + col) * valid_points_num; double *tmp_ph0 = point_hessians + ((3 * row) * 6 + col) * valid_points_num; double *tmp_ph1 = tmp_ph0 + 6 * valid_points_num; double *tmp_ph2 = tmp_ph1 + 6 * valid_points_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double ph0 = tmp_ph0[i]; double ph1 = tmp_ph1[i]; double ph2 = tmp_ph2[i]; double final_hessian = h[i]; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { //Transformed coordinates int vid = voxel_id[j]; double tmp_ex = e_x_cov_x[j]; if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) { tmp_ex *= gauss_d1; final_hessian += (d_x - centroid_x[vid]) * (icov00[vid] * ph0 + icov01[vid] * ph1 + icov02[vid] * ph2) * tmp_ex; final_hessian += (d_y - centroid_y[vid]) * (icov10[vid] * ph0 + icov11[vid] * ph1 + icov12[vid] * ph2) * tmp_ex; final_hessian += (d_z - centroid_z[vid]) * (icov20[vid] * ph0 + icov21[vid] * ph1 + icov22[vid] * ph2) * tmp_ex; } } h[i] = final_hessian; } } } /* Compute sum of a list of matrices */ __global__ void matrixSum(double *matrix_list, int full_size, int half_size, int rows, int cols, int offset) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; for (int i = index; i < half_size && row < rows && col < cols; i += stride) { MatrixDevice left(rows, cols, offset, matrix_list + i); double *right_ptr = (i + half_size < full_size) ? matrix_list + i + half_size : NULL; MatrixDevice right(rows, cols, offset, right_ptr); if (right_ptr != NULL) { left(row, col) += right(row, col); } } } /* Compute sum of score_inc list */ __global__ void sumScore(double *score, int full_size, int half_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < half_size; i += stride) { score[i] += (i + half_size < full_size) ? score[i + half_size] : 0; } } double GNormalDistributionsTransform::computeDerivatives(Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num, Eigen::Matrix<double, 6, 1> pose, bool compute_hessian) { MatrixHost p(6, 1); for (int i = 0; i < 6; i++) { p(i) = pose(i, 0); } score_gradient.setZero (); hessian.setZero (); //Compute Angle Derivatives computeAngleDerivatives(p); //Radius Search int *valid_points, *voxel_id, *starting_voxel_id; int valid_voxel_num, valid_points_num; valid_points = voxel_id = starting_voxel_id = NULL; voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX, &valid_points, &starting_voxel_id, &voxel_id, &valid_voxel_num, &valid_points_num); double *covariance = voxel_grid_.getCovarianceList(); double *inverse_covariance = voxel_grid_.getInverseCovarianceList(); double *centroid = voxel_grid_.getCentroidList(); int *points_per_voxel = voxel_grid_.getPointsPerVoxelList(); int voxel_num = voxel_grid_.getVoxelNum(); if (valid_points_num == 0) return 0; //Update score gradient and hessian matrix double *gradients, *hessians, *point_gradients, *point_hessians, *score; checkCudaErrors(hipMalloc(&gradients, sizeof(double) * valid_points_num * 6)); checkCudaErrors(hipMalloc(&hessians, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(hipMalloc(&point_gradients, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(hipMalloc(&point_hessians, sizeof(double) * valid_points_num * 18 * 6)); checkCudaErrors(hipMalloc(&score, sizeof(double) * valid_points_num)); checkCudaErrors(hipMemset(gradients, 0, sizeof(double) * valid_points_num * 6)); checkCudaErrors(hipMemset(hessians, 0, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(hipMemset(point_gradients, 0, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(hipMemset(point_hessians, 0, sizeof(double) * valid_points_num * 18 * 6)); int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num; int grid_x = (valid_points_num - 1) / block_x + 1; dim3 grid; hipLaunchKernelGGL(( computePointGradients0), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients, point_gradients + valid_points_num * 7, point_gradients + valid_points_num * 14, point_gradients + valid_points_num * 9, point_gradients + valid_points_num * 15, point_gradients + valid_points_num * 4, point_gradients + valid_points_num * 10); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointGradients1), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients + valid_points_num * 16, point_gradients + valid_points_num * 5, point_gradients + valid_points_num * 11, point_gradients + valid_points_num * 17); checkCudaErrors(hipGetLastError()); if (compute_hessian) { hipLaunchKernelGGL(( computePointHessian0), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 57, point_hessians + valid_points_num * 63, point_hessians + valid_points_num * 69, point_hessians + valid_points_num * 75, point_hessians + valid_points_num * 58, point_hessians + valid_points_num * 81, point_hessians + valid_points_num * 64, point_hessians + valid_points_num * 87, point_hessians + valid_points_num * 70, point_hessians + valid_points_num * 93, point_hessians + valid_points_num * 59, point_hessians + valid_points_num * 99, point_hessians + valid_points_num * 65, point_hessians + valid_points_num * 105, point_hessians + valid_points_num * 71); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointHessian1), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 76, point_hessians + valid_points_num * 82, point_hessians + valid_points_num * 88, point_hessians + valid_points_num * 94, point_hessians + valid_points_num * 77, point_hessians + valid_points_num * 100, point_hessians + valid_points_num * 83, point_hessians + valid_points_num * 106, point_hessians + valid_points_num * 89); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointHessian2), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 95, point_hessians + valid_points_num * 101, point_hessians + valid_points_num * 107); checkCudaErrors(hipGetLastError()); } checkCudaErrors(hipDeviceSynchronize()); double *tmp_hessian; checkCudaErrors(hipMalloc(&tmp_hessian, sizeof(double) * valid_voxel_num * 6)); double *e_x_cov_x; checkCudaErrors(hipMalloc(&e_x_cov_x, sizeof(double) * valid_voxel_num)); double *cov_dxd_pi; checkCudaErrors(hipMalloc(&cov_dxd_pi, sizeof(double) * valid_voxel_num * 3 * 6)); hipLaunchKernelGGL(( computeExCovX), dim3(grid_x), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computeScoreList), dim3(grid_x), dim3(block_x), 0, 0, starting_voxel_id, voxel_id, valid_points_num, e_x_cov_x, gauss_d1_, score); checkCudaErrors(hipGetLastError()); int block_x2 = (valid_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_voxel_num; int grid_x2 = (valid_voxel_num - 1) / block_x2 + 1; hipLaunchKernelGGL(( updateExCovX), dim3(grid_x2), dim3(block_x2), 0, 0, e_x_cov_x, gauss_d2_, valid_voxel_num); checkCudaErrors(hipGetLastError()); grid.x = grid_x; grid.y = 3; grid.z = 6; hipLaunchKernelGGL(( computeCovDxdPi), dim3(grid), dim3(block_x), 0, 0, valid_points, starting_voxel_id, voxel_id, valid_points_num, inverse_covariance, voxel_num, gauss_d1_, gauss_d2_, point_gradients, cov_dxd_pi, valid_voxel_num); checkCudaErrors(hipGetLastError()); grid.x = grid_x; grid.y = 6; grid.z = 1; hipLaunchKernelGGL(( computeScoreGradientList), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, voxel_num, e_x_cov_x, cov_dxd_pi, gauss_d1_, valid_voxel_num, gradients); checkCudaErrors(hipGetLastError()); if (compute_hessian) { grid.y = 6; grid.z = 1; hipLaunchKernelGGL(( computeHessianListS0), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_gradients, tmp_hessian, valid_voxel_num); checkCudaErrors(hipGetLastError()); grid.z = 6; hipLaunchKernelGGL(( computeHessianListS1), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, hessians, e_x_cov_x, tmp_hessian, cov_dxd_pi, point_gradients, valid_voxel_num); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computeHessianListS2), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_hessians, hessians, valid_voxel_num); checkCudaErrors(hipGetLastError()); } int full_size = valid_points_num; int half_size = (full_size - 1) / 2 + 1; while (full_size > 1) { block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size; grid_x = (half_size - 1) / block_x + 1; grid.x = grid_x; grid.y = 1; grid.z = 6; hipLaunchKernelGGL(( matrixSum), dim3(grid), dim3(block_x), 0, 0, gradients, full_size, half_size, 1, 6, valid_points_num); checkCudaErrors(hipGetLastError()); grid.y = 6; hipLaunchKernelGGL(( matrixSum), dim3(grid), dim3(block_x), 0, 0, hessians, full_size, half_size, 6, 6, valid_points_num); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( sumScore), dim3(grid_x), dim3(block_x), 0, 0, score, full_size, half_size); checkCudaErrors(hipGetLastError()); full_size = half_size; half_size = (full_size - 1) / 2 + 1; } checkCudaErrors(hipDeviceSynchronize()); MatrixDevice dgrad(1, 6, valid_points_num, gradients), dhess(6, 6, valid_points_num, hessians); MatrixHost hgrad(1, 6), hhess(6, 6); hgrad.moveToHost(dgrad); hhess.moveToHost(dhess); for (int i = 0; i < 6; i++) { score_gradient(i) = hgrad(i); } for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { hessian(i, j) = hhess(i, j); } } double score_inc; checkCudaErrors(hipMemcpy(&score_inc, score, sizeof(double), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(gradients)); checkCudaErrors(hipFree(hessians)); checkCudaErrors(hipFree(point_hessians)); checkCudaErrors(hipFree(point_gradients)); checkCudaErrors(hipFree(score)); checkCudaErrors(hipFree(tmp_hessian)); checkCudaErrors(hipFree(e_x_cov_x)); checkCudaErrors(hipFree(cov_dxd_pi)); if (valid_points != NULL) checkCudaErrors(hipFree(valid_points)); if (voxel_id != NULL) checkCudaErrors(hipFree(voxel_id)); if (starting_voxel_id != NULL) checkCudaErrors(hipFree(starting_voxel_id)); return score_inc; } void GNormalDistributionsTransform::computeAngleDerivatives(MatrixHost pose, bool compute_hessian) { double cx, cy, cz, sx, sy, sz; if (fabs(pose(3)) < 10e-5) { cx = 1.0; sx = 0.0; } else { cx = cos(pose(3)); sx = sin(pose(3)); } if (fabs(pose(4)) < 10e-5) { cy = 1.0; sy = 0.0; } else { cy = cos(pose(4)); sy = sin(pose(4)); } if (fabs(pose(5)) < 10e-5) { cz = 1.0; sz = 0.0; } else { cz = cos(pose(5)); sz = sin(pose(5)); } j_ang_(0) = -sx * sz + cx * sy * cz; j_ang_(1) = -sx * cz - cx * sy * sz; j_ang_(2) = -cx * cy; j_ang_(3) = cx * sz + sx * sy * cz; j_ang_(4) = cx * cz - sx * sy * sz; j_ang_(5) = -sx * cy; j_ang_(6) = -sy * cz; j_ang_(7) = sy * sz; j_ang_(8) = cy; j_ang_(9) = sx * cy * cz; j_ang_(10) = -sx * cy * sz; j_ang_(11) = sx * sy; j_ang_(12) = -cx * cy * cz; j_ang_(13) = cx * cy * sz; j_ang_(14) = -cx * sy; j_ang_(15) = -cy * sz; j_ang_(16) = -cy * cz; j_ang_(17) = 0; j_ang_(18) = cx * cz - sx * sy * sz; j_ang_(19) = -cx * sz - sx * sy * cz; j_ang_(20) = 0; j_ang_(21) = sx * cz + cx * sy * sz; j_ang_(22) = cx * sy * cz - sx * sz; j_ang_(23) = 0; j_ang_.moveToGpu(dj_ang_); if (compute_hessian) { h_ang_(0) = -cx * sz - sx * sy * cz; h_ang_(1) = -cx * cz + sx * sy * sz; h_ang_(2) = sx * cy; h_ang_(3) = -sx * sz + cx * sy * cz; h_ang_(4) = -cx * sy * sz - sx * cz; h_ang_(5) = -cx * cy; h_ang_(6) = cx * cy * cz; h_ang_(7) = -cx * cy * sz; h_ang_(8) = cx * sy; h_ang_(9) = sx * cy * cz; h_ang_(10) = -sx * cy * sz; h_ang_(11) = sx * sy; h_ang_(12) = -sx * cz - cx * sy * sz; h_ang_(13) = sx * sz - cx * sy * cz; h_ang_(14) = 0; h_ang_(15) = cx * cz - sx * sy * sz; h_ang_(16) = -sx * sy * cz - cx * sz; h_ang_(17) = 0; h_ang_(18) = -cy * cz; h_ang_(19) = cy * sz; h_ang_(20) = sy; h_ang_(21) = -sx * sy * cz; h_ang_(22) = sx * sy * sz; h_ang_(23) = sx * cy; h_ang_(24) = cx * sy * cz; h_ang_(25) = -cx * sy * sz; h_ang_(26) = -cx * cy; h_ang_(27) = sy * sz; h_ang_(28) = sy * cz; h_ang_(29) = 0; h_ang_(30) = -sx * cy * sz; h_ang_(31) = -sx * cy * cz; h_ang_(32) = 0; h_ang_(33) = cx * cy * sz; h_ang_(34) = cx * cy * cz; h_ang_(35) = 0; h_ang_(36) = -cy * cz; h_ang_(37) = cy * sz; h_ang_(38) = 0; h_ang_(39) = -cx * sz - sx * sy * cz; h_ang_(40) = -cx * cz + sx * sy * sz; h_ang_(41) = 0; h_ang_(42) = -sx * sz + cx * sy * cz; h_ang_(43) = -cx * sy * sz - sx * cz; h_ang_(44) = 0; h_ang_.moveToGpu(dh_ang_); } } __global__ void gpuTransform(float *in_x, float *in_y, float *in_z, float *trans_x, float *trans_y, float *trans_z, int point_num, MatrixDevice transform) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; float x, y, z; for (int i = idx; i < point_num; i += stride) { x = in_x[i]; y = in_y[i]; z = in_z[i]; trans_x[i] = transform(0, 0) * x + transform(0, 1) * y + transform(0, 2) * z + transform(0, 3); trans_y[i] = transform(1, 0) * x + transform(1, 1) * y + transform(1, 2) * z + transform(1, 3); trans_z[i] = transform(2, 0) * x + transform(2, 1) * y + transform(2, 2) * z + transform(2, 3); } } void GNormalDistributionsTransform::transformPointCloud(float *in_x, float *in_y, float *in_z, float *trans_x, float *trans_y, float *trans_z, int points_number, Eigen::Matrix<float, 4, 4> transform) { Eigen::Transform<float, 3, Eigen::Affine> t(transform); MatrixHost htrans(3, 4); MatrixDevice dtrans(3, 4); dtrans.memAlloc(); for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) { htrans(i, j) = t(i, j); } } htrans.moveToGpu(dtrans); if (points_number > 0) { int block_x = (points_number <= BLOCK_SIZE_X) ? points_number : BLOCK_SIZE_X; int grid_x = (points_number - 1) / block_x + 1; hipLaunchKernelGGL(( gpuTransform), dim3(grid_x), dim3(block_x) , 0, 0, in_x, in_y, in_z, trans_x, trans_y, trans_z, points_number, dtrans); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); } dtrans.memFree(); } double GNormalDistributionsTransform::computeStepLengthMT(const Eigen::Matrix<double, 6, 1> &x, Eigen::Matrix<double, 6, 1> &step_dir, double step_init, double step_max, double step_min, double &score, Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num) { double phi_0 = -score; double d_phi_0 = -(score_gradient.dot(step_dir)); Eigen::Matrix<double, 6, 1> x_t; if (d_phi_0 >= 0) { if (d_phi_0 == 0) return 0; else { d_phi_0 *= -1; step_dir *= -1; } } int max_step_iterations = 10; int step_iterations = 0; double mu = 1.e-4; double nu = 0.9; double a_l = 0, a_u = 0; double f_l = auxilaryFunction_PsiMT(a_l, phi_0, phi_0, d_phi_0, mu); double g_l = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu); double f_u = auxilaryFunction_PsiMT(a_u, phi_0, phi_0, d_phi_0, mu); double g_u = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu); bool interval_converged = (step_max - step_min) > 0, open_interval = true; double a_t = step_init; a_t = ::min(a_t, step_max); a_t = ::max(a_t, step_min); x_t = x + step_dir * a_t; Eigen::Translation<float, 3> translation(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))); Eigen::AngleAxis<float> tmp1(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()); Eigen::AngleAxis<float> tmp2(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()); Eigen::AngleAxis<float> tmp3(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ()); Eigen::AngleAxis<float> tmp4(tmp1 * tmp2 * tmp3); final_transformation_ = (translation * tmp4).matrix(); transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_); score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t); double phi_t = -score; double d_phi_t = -(score_gradient.dot(step_dir)); double psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu); double d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu); while (!interval_converged && step_iterations < max_step_iterations && !(psi_t <= 0 && d_phi_t <= -nu * d_phi_0)) { if (open_interval) { a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t); } else { a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t); } a_t = (a_t < step_max) ? a_t : step_max; a_t = (a_t > step_min) ? a_t : step_min; x_t = x + step_dir * a_t; translation = Eigen::Translation<float, 3>(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))); tmp1 = Eigen::AngleAxis<float>(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()); tmp2 = Eigen::AngleAxis<float>(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()); tmp3 = Eigen::AngleAxis<float>(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ()); tmp4 = tmp1 * tmp2 * tmp3; final_transformation_ = (translation * tmp4).matrix(); transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_); score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t, false); phi_t -= score; d_phi_t -= (score_gradient.dot(step_dir)); psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu); d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu); if (open_interval && (psi_t <= 0 && d_psi_t >= 0)) { open_interval = false; f_l += phi_0 - mu * d_phi_0 * a_l; g_l += mu * d_phi_0; f_u += phi_0 - mu * d_phi_0 * a_u; g_u += mu * d_phi_0; } if (open_interval) { interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t); } else { interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t); } step_iterations++; } if (step_iterations) { computeHessian(hessian, trans_x, trans_y, trans_z, points_num, x_t); } real_iterations_ += step_iterations; return a_t; } //Copied from ndt.hpp double GNormalDistributionsTransform::trialValueSelectionMT (double a_l, double f_l, double g_l, double a_u, double f_u, double g_u, double a_t, double f_t, double g_t) { // Case 1 in Trial Value Selection [More, Thuente 1994] if (f_t > f_l) { // Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l; double w = std::sqrt (z * z - g_t * g_l); // Equation 2.4.56 [Sun, Yuan 2006] double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w); // Calculate the minimizer of the quadratic that interpolates f_l, f_t and g_l // Equation 2.4.2 [Sun, Yuan 2006] double a_q = a_l - 0.5 * (a_l - a_t) * g_l / (g_l - (f_l - f_t) / (a_l - a_t)); if (std::fabs (a_c - a_l) < std::fabs (a_q - a_l)) return (a_c); else return (0.5 * (a_q + a_c)); } // Case 2 in Trial Value Selection [More, Thuente 1994] else if (g_t * g_l < 0) { // Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l; double w = std::sqrt (z * z - g_t * g_l); // Equation 2.4.56 [Sun, Yuan 2006] double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w); // Calculate the minimizer of the quadratic that interpolates f_l, g_l and g_t // Equation 2.4.5 [Sun, Yuan 2006] double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l; if (std::fabs (a_c - a_t) >= std::fabs (a_s - a_t)) return (a_c); else return (a_s); } // Case 3 in Trial Value Selection [More, Thuente 1994] else if (std::fabs (g_t) <= std::fabs (g_l)) { // Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l; double w = std::sqrt (z * z - g_t * g_l); double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w); // Calculate the minimizer of the quadratic that interpolates g_l and g_t // Equation 2.4.5 [Sun, Yuan 2006] double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l; double a_t_next; if (std::fabs (a_c - a_t) < std::fabs (a_s - a_t)) a_t_next = a_c; else a_t_next = a_s; if (a_t > a_l) return (std::min (a_t + 0.66 * (a_u - a_t), a_t_next)); else return (std::max (a_t + 0.66 * (a_u - a_t), a_t_next)); } // Case 4 in Trial Value Selection [More, Thuente 1994] else { // Calculate the minimizer of the cubic that interpolates f_u, f_t, g_u and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_u) / (a_t - a_u) - g_t - g_u; double w = std::sqrt (z * z - g_t * g_u); // Equation 2.4.56 [Sun, Yuan 2006] return (a_u + (a_t - a_u) * (w - g_u - z) / (g_t - g_u + 2 * w)); } } //Copied from ndt.hpp double GNormalDistributionsTransform::updateIntervalMT (double &a_l, double &f_l, double &g_l, double &a_u, double &f_u, double &g_u, double a_t, double f_t, double g_t) { // Case U1 in Update Algorithm and Case a in Modified Update Algorithm [More, Thuente 1994] if (f_t > f_l) { a_u = a_t; f_u = f_t; g_u = g_t; return (false); } // Case U2 in Update Algorithm and Case b in Modified Update Algorithm [More, Thuente 1994] else if (g_t * (a_l - a_t) > 0) { a_l = a_t; f_l = f_t; g_l = g_t; return (false); } // Case U3 in Update Algorithm and Case c in Modified Update Algorithm [More, Thuente 1994] else if (g_t * (a_l - a_t) < 0) { a_u = a_l; f_u = f_l; g_u = g_l; a_l = a_t; f_l = f_t; g_l = g_t; return (false); } // Interval Converged else return (true); } void GNormalDistributionsTransform::computeHessian(Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num, Eigen::Matrix<double, 6, 1> &p) { int *valid_points, *voxel_id, *starting_voxel_id; int valid_voxel_num, valid_points_num; //Radius Search voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX, &valid_points, &starting_voxel_id, &voxel_id, &valid_voxel_num, &valid_points_num); double *centroid = voxel_grid_.getCentroidList(); double *covariance = voxel_grid_.getCovarianceList(); double *inverse_covariance = voxel_grid_.getInverseCovarianceList(); int *points_per_voxel = voxel_grid_.getPointsPerVoxelList(); int voxel_num = voxel_grid_.getVoxelNum(); if (valid_points_num <= 0) return; //Update score gradient and hessian matrix double *hessians, *point_gradients, *point_hessians; checkCudaErrors(hipMalloc(&hessians, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(hipMalloc(&point_gradients, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(hipMalloc(&point_hessians, sizeof(double) * valid_points_num * 18 * 6)); checkCudaErrors(hipMemset(hessians, 0, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(hipMemset(point_gradients, 0, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(hipMemset(point_hessians, 0, sizeof(double) * valid_points_num * 18 * 6)); int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num; int grid_x = (valid_points_num - 1) / block_x + 1; dim3 grid; hipLaunchKernelGGL(( computePointGradients0), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients, point_gradients + valid_points_num * 7, point_gradients + valid_points_num * 14, point_gradients + valid_points_num * 9, point_gradients + valid_points_num * 15, point_gradients + valid_points_num * 4, point_gradients + valid_points_num * 10); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointGradients1), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients + valid_points_num * 16, point_gradients + valid_points_num * 5, point_gradients + valid_points_num * 11, point_gradients + valid_points_num * 17); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointHessian0), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 57, point_hessians + valid_points_num * 63, point_hessians + valid_points_num * 69, point_hessians + valid_points_num * 75, point_hessians + valid_points_num * 58, point_hessians + valid_points_num * 81, point_hessians + valid_points_num * 64, point_hessians + valid_points_num * 87, point_hessians + valid_points_num * 70, point_hessians + valid_points_num * 93, point_hessians + valid_points_num * 59, point_hessians + valid_points_num * 99, point_hessians + valid_points_num * 65, point_hessians + valid_points_num * 105, point_hessians + valid_points_num * 71); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointHessian1), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 76, point_hessians + valid_points_num * 82, point_hessians + valid_points_num * 88, point_hessians + valid_points_num * 94, point_hessians + valid_points_num * 77, point_hessians + valid_points_num * 100, point_hessians + valid_points_num * 83, point_hessians + valid_points_num * 106, point_hessians + valid_points_num * 89); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointHessian2), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 95, point_hessians + valid_points_num * 101, point_hessians + valid_points_num * 107); checkCudaErrors(hipGetLastError()); double *tmp_hessian; checkCudaErrors(hipMalloc(&tmp_hessian, sizeof(double) * valid_voxel_num * 6)); double *e_x_cov_x; checkCudaErrors(hipMalloc(&e_x_cov_x, sizeof(double) * valid_voxel_num)); double *cov_dxd_pi; checkCudaErrors(hipMalloc(&cov_dxd_pi, sizeof(double) * valid_voxel_num * 3 * 6)); hipLaunchKernelGGL(( computeExCovX), dim3(grid_x), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num); checkCudaErrors(hipGetLastError()); grid.x = grid_x; grid.y = 3; grid.z = 6; hipLaunchKernelGGL(( computeCovDxdPi), dim3(grid), dim3(block_x), 0, 0, valid_points, starting_voxel_id, voxel_id, valid_points_num, inverse_covariance, voxel_num, gauss_d1_, gauss_d2_, point_gradients, cov_dxd_pi, valid_voxel_num); checkCudaErrors(hipGetLastError()); int block_x2 = (valid_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_voxel_num; int grid_x2 = (valid_voxel_num - 1) / block_x2 + 1; hipLaunchKernelGGL(( updateExCovX), dim3(grid_x2), dim3(block_x2), 0, 0, e_x_cov_x, gauss_d2_, valid_voxel_num); checkCudaErrors(hipGetLastError()); grid.y = 6; grid.z = 1; hipLaunchKernelGGL(( computeHessianListS0), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_gradients, tmp_hessian, valid_voxel_num); checkCudaErrors(hipGetLastError()); grid.z = 6; hipLaunchKernelGGL(( computeHessianListS1), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, hessians, e_x_cov_x, tmp_hessian, cov_dxd_pi, point_gradients, valid_voxel_num); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computeHessianListS2), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_hessians, hessians, valid_voxel_num); checkCudaErrors(hipGetLastError()); int full_size = valid_points_num; int half_size = (full_size - 1) / 2 + 1; while (full_size > 1) { block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size; grid_x = (half_size - 1) / block_x + 1; grid.x = grid_x; grid.y = 6; grid.z = 6; hipLaunchKernelGGL(( matrixSum), dim3(grid_x), dim3(block_x), 0, 0, hessians, full_size, half_size, 6, 6, valid_points_num); full_size = half_size; half_size = (full_size - 1) / 2 + 1; } checkCudaErrors(hipDeviceSynchronize()); MatrixDevice dhessian(6, 6, valid_points_num, hessians); MatrixHost hhessian(6, 6); hhessian.moveToHost(dhessian); for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { hessian(i, j) = hhessian(i, j); } } checkCudaErrors(hipFree(hessians)); checkCudaErrors(hipFree(point_hessians)); checkCudaErrors(hipFree(point_gradients)); checkCudaErrors(hipFree(tmp_hessian)); checkCudaErrors(hipFree(e_x_cov_x)); checkCudaErrors(hipFree(cov_dxd_pi)); if (valid_points != NULL) { checkCudaErrors(hipFree(valid_points)); } if (voxel_id != NULL) { checkCudaErrors(hipFree(voxel_id)); } if (starting_voxel_id != NULL) { checkCudaErrors(hipFree(starting_voxel_id)); } dhessian.memFree(); } template <typename T> __global__ void gpuSum(T *input, int size, int half_size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < half_size; i += stride) { if (i + half_size < size) { input[i] += (half_size < size) ? input[i + half_size] : 0; } } } double GNormalDistributionsTransform::getFitnessScore(double max_range) { double fitness_score = 0.0; float *trans_x, *trans_y, *trans_z; checkCudaErrors(hipMalloc(&trans_x, sizeof(float) * points_number_)); checkCudaErrors(hipMalloc(&trans_y, sizeof(float) * points_number_)); checkCudaErrors(hipMalloc(&trans_z, sizeof(float) * points_number_)); transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_number_, final_transformation_); int *valid_distance; checkCudaErrors(hipMalloc(&valid_distance, sizeof(int) * points_number_)); double *min_distance; checkCudaErrors(hipMalloc(&min_distance, sizeof(double) * points_number_)); voxel_grid_.nearestNeighborSearch(trans_x, trans_y, trans_z, points_number_, valid_distance, min_distance, max_range); int size = points_number_; int half_size; while (size > 1) { half_size = (size - 1) / 2 + 1; int block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size; int grid_x = (half_size - 1) / block_x + 1; hipLaunchKernelGGL(( gpuSum<double>), dim3(grid_x), dim3(block_x), 0, 0, min_distance, size, half_size); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( gpuSum<int>), dim3(grid_x), dim3(block_x), 0, 0, valid_distance, size, half_size); checkCudaErrors(hipGetLastError()); size = half_size; } checkCudaErrors(hipDeviceSynchronize()); int nr; checkCudaErrors(hipMemcpy(&nr, valid_distance, sizeof(int), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&fitness_score, min_distance, sizeof(double), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(trans_x)); checkCudaErrors(hipFree(trans_y)); checkCudaErrors(hipFree(trans_z)); checkCudaErrors(hipFree(valid_distance)); checkCudaErrors(hipFree(min_distance)); if (nr > 0) return (fitness_score / nr); return DBL_MAX; } }
37b19667c7c644ebf561c0fd666c16a6f518f910.cu
#include "ndt_gpu/NormalDistributionsTransform.h" #include "ndt_gpu/debug.h" #include <cmath> #include <iostream> #include <pcl/common/transforms.h> #include "glog/logging.h" using std::endl; namespace gpu { GNormalDistributionsTransform::GNormalDistributionsTransform() { //GRegistration::GRegistration(); gauss_d1_ = gauss_d2_ = 0; outlier_ratio_ = 0.55; step_size_ = 0.1; resolution_ = 1.0f; trans_probability_ = 0; double gauss_c1, gauss_c2, gauss_d3; // Initializes the guassian fitting parameters (eq. 6.8) [Magnusson 2009] gauss_c1 = 10.0 * (1 - outlier_ratio_); gauss_c2 = outlier_ratio_ / pow (resolution_, 3); gauss_d3 = -log (gauss_c2); gauss_d1_ = -log ( gauss_c1 + gauss_c2 ) - gauss_d3; gauss_d2_ = -2 * log ((-log ( gauss_c1 * exp ( -0.5 ) + gauss_c2 ) - gauss_d3) / gauss_d1_); transformation_epsilon_ = 0.1; max_iterations_ = 35; j_ang_ = MatrixHost(24, 1); h_ang_ = MatrixHost(45, 1); dj_ang_ = MatrixDevice(24, 1); dh_ang_ = MatrixDevice(45, 1); real_iterations_ = 0; } GNormalDistributionsTransform::GNormalDistributionsTransform(const GNormalDistributionsTransform &other) { gauss_d1_ = other.gauss_d1_; gauss_d2_ = other.gauss_d2_; outlier_ratio_ = other.outlier_ratio_; j_ang_ = other.j_ang_; h_ang_ = other.h_ang_; dj_ang_ = other.dj_ang_; dh_ang_ = other.dh_ang_; step_size_ = other.step_size_; resolution_ = other.resolution_; trans_probability_ = other.trans_probability_; real_iterations_ = other.real_iterations_; voxel_grid_ = other.voxel_grid_; } GNormalDistributionsTransform::~GNormalDistributionsTransform() { dj_ang_.memFree(); dh_ang_.memFree(); } void GNormalDistributionsTransform::setStepSize(double step_size) { step_size_ = step_size; } void GNormalDistributionsTransform::setResolution(float resolution) { resolution_ = resolution; } void GNormalDistributionsTransform::setOutlierRatio(double olr) { outlier_ratio_ = olr; } double GNormalDistributionsTransform::getStepSize() const { return step_size_; } float GNormalDistributionsTransform::getResolution() const { return resolution_; } double GNormalDistributionsTransform::getOutlierRatio() const { return outlier_ratio_; } double GNormalDistributionsTransform::getTransformationProbability() const { return trans_probability_; } int GNormalDistributionsTransform::getRealIterations() { return real_iterations_; } double GNormalDistributionsTransform::auxilaryFunction_PsiMT(double a, double f_a, double f_0, double g_0, double mu) { return (f_a - f_0 - mu * g_0 * a); } double GNormalDistributionsTransform::auxilaryFunction_dPsiMT(double g_a, double g_0, double mu) { return (g_a - mu * g_0); } void GNormalDistributionsTransform::setInputTarget(pcl::PointCloud<pcl::PointXYZI>::Ptr input) { // Copy input map data from the host memory to the GPU memory GRegistration::setInputTarget(input); LOG(INFO)<<"memcpy host to gpu."<<endl; // Build the voxel grid if (target_points_number_ != 0) { voxel_grid_.setLeafSize(resolution_, resolution_, resolution_); voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_); } LOG(INFO)<<"leafsize set."<<endl; } void GNormalDistributionsTransform::setInputTarget(pcl::PointCloud<pcl::PointXYZ>::Ptr input) { // Copy input map data from the host memory to the GPU memory GRegistration::setInputTarget(input); // Build the voxel grid if (target_points_number_ != 0) { voxel_grid_.setLeafSize(resolution_, resolution_, resolution_); voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_); } } void GNormalDistributionsTransform::computeTransformation(const Eigen::Matrix<float, 4, 4> &guess) { if (dj_ang_.isEmpty()) { dj_ang_.memAlloc(); } if (dh_ang_.isEmpty()) { dh_ang_.memAlloc(); } nr_iterations_ = 0; converged_ = false; double gauss_c1, gauss_c2, gauss_d3; gauss_c1 = 10 * ( 1 - outlier_ratio_); gauss_c2 = outlier_ratio_ / pow(resolution_, 3); gauss_d3 = - log(gauss_c2); gauss_d1_ = -log(gauss_c1 + gauss_c2) - gauss_d3; gauss_d2_ = -2 * log((-log(gauss_c1 * exp(-0.5) + gauss_c2) - gauss_d3) / gauss_d1_); if (guess != Eigen::Matrix4f::Identity()) { final_transformation_ = guess; transformPointCloud(x_, y_, z_, trans_x_, trans_y_, trans_z_, points_number_, guess); } Eigen::Transform<float, 3, Eigen::Affine, Eigen::ColMajor> eig_transformation; eig_transformation.matrix() = final_transformation_; Eigen::Matrix<double, 6, 1> p, delta_p, score_gradient; Eigen::Vector3f init_translation = eig_transformation.translation(); Eigen::Vector3f init_rotation = eig_transformation.rotation().eulerAngles(0, 1, 2); p << init_translation(0), init_translation(1), init_translation(2), init_rotation(0), init_rotation(1), init_rotation(2); Eigen::Matrix<double, 6, 6> hessian; double score = 0; double delta_p_norm; score = computeDerivatives(score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_, p); int loop_time = 0; while (!converged_) { previous_transformation_ = transformation_; Eigen::JacobiSVD<Eigen::Matrix<double, 6, 6>> sv(hessian, Eigen::ComputeFullU | Eigen::ComputeFullV); delta_p = sv.solve(-score_gradient); delta_p_norm = delta_p.norm(); if (delta_p_norm == 0 || delta_p_norm != delta_p_norm) { trans_probability_ = score / static_cast<double>(points_number_); converged_ = delta_p_norm == delta_p_norm; return; } delta_p.normalize(); delta_p_norm = computeStepLengthMT(p, delta_p, delta_p_norm, step_size_, transformation_epsilon_ / 2, score, score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_); delta_p *= delta_p_norm; Eigen::Translation<float, 3> translation(static_cast<float>(delta_p(0)), static_cast<float>(delta_p(1)), static_cast<float>(delta_p(2))); Eigen::AngleAxis<float> tmp1(static_cast<float>(delta_p(3)), Eigen::Vector3f::UnitX()); Eigen::AngleAxis<float> tmp2(static_cast<float>(delta_p(4)), Eigen::Vector3f::UnitY()); Eigen::AngleAxis<float> tmp3(static_cast<float>(delta_p(5)), Eigen::Vector3f::UnitZ()); Eigen::AngleAxis<float> tmp4(tmp1 * tmp2 * tmp3); transformation_ = (translation * tmp4).matrix(); p = p + delta_p; //Not update visualizer if (nr_iterations_ > max_iterations_ || (nr_iterations_ && (std::fabs(delta_p_norm) < transformation_epsilon_))) converged_ = true; nr_iterations_++; loop_time++; } trans_probability_ = score / static_cast<double>(points_number_); } /* First step of computing point gradients */ __global__ void computePointGradients0(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dj_ang, double *pg00, double *pg11, double *pg22, double *pg13, double *pg23, double *pg04, double *pg14) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double j_ang[12]; if (threadIdx.x < 12) { j_ang[threadIdx.x] = dj_ang[threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); //Set the 3x3 block start from (0, 0) to identity matrix pg00[i] = 1; pg11[i] = 1; pg22[i] = 1; //Compute point derivatives pg13[i] = o_x * j_ang[0] + o_y * j_ang[1] + o_z * j_ang[2]; pg23[i] = o_x * j_ang[3] + o_y * j_ang[4] + o_z * j_ang[5]; pg04[i] = o_x * j_ang[6] + o_y * j_ang[7] + o_z * j_ang[8]; pg14[i] = o_x * j_ang[9] + o_y * j_ang[10] + o_z * j_ang[11]; } } /* Second step of computing point gradients */ __global__ void computePointGradients1(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dj_ang, double *pg24, double *pg05, double *pg15, double *pg25) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double j_ang[12]; if (threadIdx.x < 12) { j_ang[threadIdx.x] = dj_ang[threadIdx.x + 12]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); //Compute point derivatives pg24[i] = o_x * j_ang[0] + o_y * j_ang[1] + o_z * j_ang[2]; pg05[i] = o_x * j_ang[3] + o_y * j_ang[4] + o_z * j_ang[5]; pg15[i] = o_x * j_ang[6] + o_y * j_ang[7] + o_z * j_ang[8]; pg25[i] = o_x * j_ang[9] + o_y * j_ang[10] + o_z * j_ang[11]; } } /* First step of computing point hessians */ __global__ void computePointHessian0(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dh_ang, double *ph93, double *ph103, double *ph113, double *ph123, double *ph94, double *ph133, double *ph104, double *ph143, double *ph114, double *ph153, double *ph95, double *ph163, double *ph105, double *ph173, double *ph115) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double h_ang[18]; if (threadIdx.x < 18) { h_ang[threadIdx.x] = dh_ang[threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); ph93[i] = 0; ph103[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2]; ph113[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5]; ph123[i] = ph94[i] = 0; ph133[i] = ph104[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8]; ph143[i] = ph114[i] = o_x * h_ang[9] + o_y * h_ang[10] + o_z * h_ang[11]; ph153[i] = ph95[i] = 0; ph163[i] = ph105[i] = o_x * h_ang[12] + o_y * h_ang[13] + o_z * h_ang[14]; ph173[i] = ph115[i] = o_x * h_ang[15] + o_y * h_ang[16] + o_z * h_ang[17]; } } __global__ void computePointHessian1(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dh_ang, double *ph124, double *ph134, double *ph144, double *ph154, double *ph125, double *ph164, double *ph135, double *ph174, double *ph145) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double h_ang[18]; if (threadIdx.x < 18) { h_ang[threadIdx.x] = dh_ang[18 + threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); ph124[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2]; ph134[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5]; ph144[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8]; ph154[i] = ph125[i] = o_x * h_ang[9] + o_y * h_ang[10] + o_z * h_ang[11]; ph164[i] = ph135[i] = o_x * h_ang[12] + o_y * h_ang[13] + o_z * h_ang[14]; ph174[i] = ph145[i] = o_x * h_ang[15] + o_y * h_ang[16] + o_z * h_ang[17]; } } __global__ void computePointHessian2(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dh_ang, double *ph155, double *ph165, double *ph175) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double h_ang[9]; if (threadIdx.x < 9) { h_ang[threadIdx.x] = dh_ang[36 + threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); ph155[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2]; ph165[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5]; ph175[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8]; } } /* compute score_inc list for input points. * The final score_inc is calculated by a reduction sum * on this score_inc list. */ __global__ void computeScoreList(int *starting_voxel_id, int *voxel_id, int valid_points_num, double *e_x_cov_x, double gauss_d1, double *score) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < valid_points_num; i += stride) { double score_inc = 0; for (int vid = starting_voxel_id[i]; vid < starting_voxel_id[i + 1]; vid++) { double tmp_ex = e_x_cov_x[vid]; score_inc += (tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex) ? 0 : -gauss_d1 * tmp_ex; } score[i] = score_inc; } } /* First step to compute score gradient list for input points */ __global__ void computeScoreGradientList(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, int voxel_num, double *e_x_cov_x, double *cov_dxd_pi, double gauss_d1, int valid_voxel_num, double *score_gradients) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int col = blockIdx.y; if (col < 6) { double *sg = score_gradients + col * valid_points_num; double *cov_dxd_pi_mat0 = cov_dxd_pi + col * valid_voxel_num; double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num; double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double tmp_sg = 0.0; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; double tmp_ex = e_x_cov_x[j]; if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) { tmp_ex *= gauss_d1; tmp_sg += ((d_x - centroid_x[vid]) * cov_dxd_pi_mat0[j] + (d_y - centroid_y[vid]) * cov_dxd_pi_mat1[j] + (d_z - centroid_z[vid]) * cov_dxd_pi_mat2[j]) * tmp_ex; } } sg[i] = tmp_sg; } } } /* Intermediate step to compute e_x_cov_x */ __global__ void computeExCovX(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centr_x, double *centr_y, double *centr_z, double gauss_d1, double gauss_d2, double *e_x_cov_x, double *icov00, double *icov01, double *icov02, double *icov10, double *icov11, double *icov12, double *icov20, double *icov21, double *icov22) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double t_x, t_y, t_z; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; t_x = d_x - centr_x[vid]; t_y = d_y - centr_y[vid]; t_z = d_z - centr_z[vid]; e_x_cov_x[j] = exp(-gauss_d2 * ((t_x * icov00[vid] + t_y * icov01[vid] + t_z * icov02[vid]) * t_x + ((t_x * icov10[vid] + t_y * icov11[vid] + t_z * icov12[vid]) * t_y) + ((t_x * icov20[vid] + t_y * icov21[vid] + t_z * icov22[vid]) * t_z)) / 2.0); } } } /* update e_x_cov_x - Reusable portion of Equation 6.12 and 6.13 [Magnusson 2009] */ __global__ void updateExCovX(double *e_x_cov_x, double gauss_d2, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < valid_voxel_num; i += stride) { e_x_cov_x[i] *= gauss_d2; } } /* compute cov_dxd_pi as reusable portion of Equation 6.12 and 6.13 [Magnusson 2009]*/ __global__ void computeCovDxdPi(int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *inverse_covariance, int voxel_num, double gauss_d1, double gauss_d2, double *point_gradients, double *cov_dxd_pi, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; if (row < 3 && col < 6) { double *icov0 = inverse_covariance + row * 3 * voxel_num; double *icov1 = icov0 + voxel_num; double *icov2 = icov1 + voxel_num; double *cov_dxd_pi_tmp = cov_dxd_pi + (row * 6 + col) * valid_voxel_num; double *pg_tmp0 = point_gradients + col * valid_points_num; double *pg_tmp1 = pg_tmp0 + 6 * valid_points_num; double *pg_tmp2 = pg_tmp1 + 6 * valid_points_num; for (int i = id; i < valid_points_num; i += stride) { double pg0 = pg_tmp0[i]; double pg1 = pg_tmp1[i]; double pg2 = pg_tmp2[i]; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; cov_dxd_pi_tmp[j] = icov0[vid] * pg0 + icov1[vid] * pg1 + icov2[vid] * pg2; } } } } /* First step to compute hessian list for input points */ __global__ void computeHessianListS0(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, double *icov00, double *icov01, double *icov02, double *icov10, double *icov11, double *icov12, double *icov20, double *icov21, double *icov22, double *point_gradients, double *tmp_hessian, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int col = blockIdx.y; if (col < 6) { double *tmp_pg0 = point_gradients + col * valid_points_num; double *tmp_pg1 = tmp_pg0 + 6 * valid_points_num; double *tmp_pg2 = tmp_pg1 + 6 * valid_points_num; double *tmp_h = tmp_hessian + col * valid_voxel_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double pg0 = tmp_pg0[i]; double pg1 = tmp_pg1[i]; double pg2 = tmp_pg2[i]; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; tmp_h[j] = (d_x - centroid_x[vid]) * (icov00[vid] * pg0 + icov01[vid] * pg1 + icov02[vid] * pg2) + (d_y - centroid_y[vid]) * (icov10[vid] * pg0 + icov11[vid] * pg1 + icov12[vid] * pg2) + (d_z - centroid_z[vid]) * (icov20[vid] * pg0 + icov21[vid] * pg1 + icov22[vid] * pg2); } } } } /* Fourth step to compute hessian list */ __global__ void computeHessianListS1(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, double gauss_d1, double gauss_d2, double *hessians, double *e_x_cov_x, double *tmp_hessian, double *cov_dxd_pi, double *point_gradients, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; if (row < 6 && col < 6) { double *cov_dxd_pi_mat0 = cov_dxd_pi + row * valid_voxel_num; double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num; double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num; double *tmp_h = tmp_hessian + col * valid_voxel_num; double *h = hessians + (row * 6 + col) * valid_points_num; double *tmp_pg0 = point_gradients + col * valid_points_num; double *tmp_pg1 = tmp_pg0 + 6 * valid_points_num; double *tmp_pg2 = tmp_pg1 + 6 * valid_points_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double pg0 = tmp_pg0[i]; double pg1 = tmp_pg1[i]; double pg2 = tmp_pg2[i]; double final_hessian = 0.0; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { //Transformed coordinates int vid = voxel_id[j]; double tmp_ex = e_x_cov_x[j]; if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) { double cov_dxd0 = cov_dxd_pi_mat0[j]; double cov_dxd1 = cov_dxd_pi_mat1[j]; double cov_dxd2 = cov_dxd_pi_mat2[j]; tmp_ex *= gauss_d1; final_hessian += -gauss_d2 * ((d_x - centroid_x[vid]) * cov_dxd0 + (d_y - centroid_y[vid]) * cov_dxd1 + (d_z - centroid_z[vid]) * cov_dxd2) * tmp_h[j] * tmp_ex; final_hessian += (pg0 * cov_dxd0 + pg1 * cov_dxd1 + pg2 * cov_dxd2) * tmp_ex; } } h[i] = final_hessian; } } } __global__ void computeHessianListS2(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, double gauss_d1, double *e_x_cov_x, double *icov00, double *icov01, double *icov02, double *icov10, double *icov11, double *icov12, double *icov20, double *icov21, double *icov22, double *point_hessians, double *hessians, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; if (row < 6 && col < 6) { double *h = hessians + (row * 6 + col) * valid_points_num; double *tmp_ph0 = point_hessians + ((3 * row) * 6 + col) * valid_points_num; double *tmp_ph1 = tmp_ph0 + 6 * valid_points_num; double *tmp_ph2 = tmp_ph1 + 6 * valid_points_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double ph0 = tmp_ph0[i]; double ph1 = tmp_ph1[i]; double ph2 = tmp_ph2[i]; double final_hessian = h[i]; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { //Transformed coordinates int vid = voxel_id[j]; double tmp_ex = e_x_cov_x[j]; if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) { tmp_ex *= gauss_d1; final_hessian += (d_x - centroid_x[vid]) * (icov00[vid] * ph0 + icov01[vid] * ph1 + icov02[vid] * ph2) * tmp_ex; final_hessian += (d_y - centroid_y[vid]) * (icov10[vid] * ph0 + icov11[vid] * ph1 + icov12[vid] * ph2) * tmp_ex; final_hessian += (d_z - centroid_z[vid]) * (icov20[vid] * ph0 + icov21[vid] * ph1 + icov22[vid] * ph2) * tmp_ex; } } h[i] = final_hessian; } } } /* Compute sum of a list of matrices */ __global__ void matrixSum(double *matrix_list, int full_size, int half_size, int rows, int cols, int offset) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; for (int i = index; i < half_size && row < rows && col < cols; i += stride) { MatrixDevice left(rows, cols, offset, matrix_list + i); double *right_ptr = (i + half_size < full_size) ? matrix_list + i + half_size : NULL; MatrixDevice right(rows, cols, offset, right_ptr); if (right_ptr != NULL) { left(row, col) += right(row, col); } } } /* Compute sum of score_inc list */ __global__ void sumScore(double *score, int full_size, int half_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < half_size; i += stride) { score[i] += (i + half_size < full_size) ? score[i + half_size] : 0; } } double GNormalDistributionsTransform::computeDerivatives(Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num, Eigen::Matrix<double, 6, 1> pose, bool compute_hessian) { MatrixHost p(6, 1); for (int i = 0; i < 6; i++) { p(i) = pose(i, 0); } score_gradient.setZero (); hessian.setZero (); //Compute Angle Derivatives computeAngleDerivatives(p); //Radius Search int *valid_points, *voxel_id, *starting_voxel_id; int valid_voxel_num, valid_points_num; valid_points = voxel_id = starting_voxel_id = NULL; voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX, &valid_points, &starting_voxel_id, &voxel_id, &valid_voxel_num, &valid_points_num); double *covariance = voxel_grid_.getCovarianceList(); double *inverse_covariance = voxel_grid_.getInverseCovarianceList(); double *centroid = voxel_grid_.getCentroidList(); int *points_per_voxel = voxel_grid_.getPointsPerVoxelList(); int voxel_num = voxel_grid_.getVoxelNum(); if (valid_points_num == 0) return 0; //Update score gradient and hessian matrix double *gradients, *hessians, *point_gradients, *point_hessians, *score; checkCudaErrors(cudaMalloc(&gradients, sizeof(double) * valid_points_num * 6)); checkCudaErrors(cudaMalloc(&hessians, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(cudaMalloc(&point_gradients, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(cudaMalloc(&point_hessians, sizeof(double) * valid_points_num * 18 * 6)); checkCudaErrors(cudaMalloc(&score, sizeof(double) * valid_points_num)); checkCudaErrors(cudaMemset(gradients, 0, sizeof(double) * valid_points_num * 6)); checkCudaErrors(cudaMemset(hessians, 0, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(cudaMemset(point_gradients, 0, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(cudaMemset(point_hessians, 0, sizeof(double) * valid_points_num * 18 * 6)); int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num; int grid_x = (valid_points_num - 1) / block_x + 1; dim3 grid; computePointGradients0<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients, point_gradients + valid_points_num * 7, point_gradients + valid_points_num * 14, point_gradients + valid_points_num * 9, point_gradients + valid_points_num * 15, point_gradients + valid_points_num * 4, point_gradients + valid_points_num * 10); checkCudaErrors(cudaGetLastError()); computePointGradients1<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients + valid_points_num * 16, point_gradients + valid_points_num * 5, point_gradients + valid_points_num * 11, point_gradients + valid_points_num * 17); checkCudaErrors(cudaGetLastError()); if (compute_hessian) { computePointHessian0<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 57, point_hessians + valid_points_num * 63, point_hessians + valid_points_num * 69, point_hessians + valid_points_num * 75, point_hessians + valid_points_num * 58, point_hessians + valid_points_num * 81, point_hessians + valid_points_num * 64, point_hessians + valid_points_num * 87, point_hessians + valid_points_num * 70, point_hessians + valid_points_num * 93, point_hessians + valid_points_num * 59, point_hessians + valid_points_num * 99, point_hessians + valid_points_num * 65, point_hessians + valid_points_num * 105, point_hessians + valid_points_num * 71); checkCudaErrors(cudaGetLastError()); computePointHessian1<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 76, point_hessians + valid_points_num * 82, point_hessians + valid_points_num * 88, point_hessians + valid_points_num * 94, point_hessians + valid_points_num * 77, point_hessians + valid_points_num * 100, point_hessians + valid_points_num * 83, point_hessians + valid_points_num * 106, point_hessians + valid_points_num * 89); checkCudaErrors(cudaGetLastError()); computePointHessian2<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 95, point_hessians + valid_points_num * 101, point_hessians + valid_points_num * 107); checkCudaErrors(cudaGetLastError()); } checkCudaErrors(cudaDeviceSynchronize()); double *tmp_hessian; checkCudaErrors(cudaMalloc(&tmp_hessian, sizeof(double) * valid_voxel_num * 6)); double *e_x_cov_x; checkCudaErrors(cudaMalloc(&e_x_cov_x, sizeof(double) * valid_voxel_num)); double *cov_dxd_pi; checkCudaErrors(cudaMalloc(&cov_dxd_pi, sizeof(double) * valid_voxel_num * 3 * 6)); computeExCovX<<<grid_x, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num); checkCudaErrors(cudaGetLastError()); computeScoreList<<<grid_x, block_x>>>(starting_voxel_id, voxel_id, valid_points_num, e_x_cov_x, gauss_d1_, score); checkCudaErrors(cudaGetLastError()); int block_x2 = (valid_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_voxel_num; int grid_x2 = (valid_voxel_num - 1) / block_x2 + 1; updateExCovX<<<grid_x2, block_x2>>>(e_x_cov_x, gauss_d2_, valid_voxel_num); checkCudaErrors(cudaGetLastError()); grid.x = grid_x; grid.y = 3; grid.z = 6; computeCovDxdPi<<<grid, block_x>>>(valid_points, starting_voxel_id, voxel_id, valid_points_num, inverse_covariance, voxel_num, gauss_d1_, gauss_d2_, point_gradients, cov_dxd_pi, valid_voxel_num); checkCudaErrors(cudaGetLastError()); grid.x = grid_x; grid.y = 6; grid.z = 1; computeScoreGradientList<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, voxel_num, e_x_cov_x, cov_dxd_pi, gauss_d1_, valid_voxel_num, gradients); checkCudaErrors(cudaGetLastError()); if (compute_hessian) { grid.y = 6; grid.z = 1; computeHessianListS0<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_gradients, tmp_hessian, valid_voxel_num); checkCudaErrors(cudaGetLastError()); grid.z = 6; computeHessianListS1<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, hessians, e_x_cov_x, tmp_hessian, cov_dxd_pi, point_gradients, valid_voxel_num); checkCudaErrors(cudaGetLastError()); computeHessianListS2<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_hessians, hessians, valid_voxel_num); checkCudaErrors(cudaGetLastError()); } int full_size = valid_points_num; int half_size = (full_size - 1) / 2 + 1; while (full_size > 1) { block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size; grid_x = (half_size - 1) / block_x + 1; grid.x = grid_x; grid.y = 1; grid.z = 6; matrixSum<<<grid, block_x>>>(gradients, full_size, half_size, 1, 6, valid_points_num); checkCudaErrors(cudaGetLastError()); grid.y = 6; matrixSum<<<grid, block_x>>>(hessians, full_size, half_size, 6, 6, valid_points_num); checkCudaErrors(cudaGetLastError()); sumScore<<<grid_x, block_x>>>(score, full_size, half_size); checkCudaErrors(cudaGetLastError()); full_size = half_size; half_size = (full_size - 1) / 2 + 1; } checkCudaErrors(cudaDeviceSynchronize()); MatrixDevice dgrad(1, 6, valid_points_num, gradients), dhess(6, 6, valid_points_num, hessians); MatrixHost hgrad(1, 6), hhess(6, 6); hgrad.moveToHost(dgrad); hhess.moveToHost(dhess); for (int i = 0; i < 6; i++) { score_gradient(i) = hgrad(i); } for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { hessian(i, j) = hhess(i, j); } } double score_inc; checkCudaErrors(cudaMemcpy(&score_inc, score, sizeof(double), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(gradients)); checkCudaErrors(cudaFree(hessians)); checkCudaErrors(cudaFree(point_hessians)); checkCudaErrors(cudaFree(point_gradients)); checkCudaErrors(cudaFree(score)); checkCudaErrors(cudaFree(tmp_hessian)); checkCudaErrors(cudaFree(e_x_cov_x)); checkCudaErrors(cudaFree(cov_dxd_pi)); if (valid_points != NULL) checkCudaErrors(cudaFree(valid_points)); if (voxel_id != NULL) checkCudaErrors(cudaFree(voxel_id)); if (starting_voxel_id != NULL) checkCudaErrors(cudaFree(starting_voxel_id)); return score_inc; } void GNormalDistributionsTransform::computeAngleDerivatives(MatrixHost pose, bool compute_hessian) { double cx, cy, cz, sx, sy, sz; if (fabs(pose(3)) < 10e-5) { cx = 1.0; sx = 0.0; } else { cx = cos(pose(3)); sx = sin(pose(3)); } if (fabs(pose(4)) < 10e-5) { cy = 1.0; sy = 0.0; } else { cy = cos(pose(4)); sy = sin(pose(4)); } if (fabs(pose(5)) < 10e-5) { cz = 1.0; sz = 0.0; } else { cz = cos(pose(5)); sz = sin(pose(5)); } j_ang_(0) = -sx * sz + cx * sy * cz; j_ang_(1) = -sx * cz - cx * sy * sz; j_ang_(2) = -cx * cy; j_ang_(3) = cx * sz + sx * sy * cz; j_ang_(4) = cx * cz - sx * sy * sz; j_ang_(5) = -sx * cy; j_ang_(6) = -sy * cz; j_ang_(7) = sy * sz; j_ang_(8) = cy; j_ang_(9) = sx * cy * cz; j_ang_(10) = -sx * cy * sz; j_ang_(11) = sx * sy; j_ang_(12) = -cx * cy * cz; j_ang_(13) = cx * cy * sz; j_ang_(14) = -cx * sy; j_ang_(15) = -cy * sz; j_ang_(16) = -cy * cz; j_ang_(17) = 0; j_ang_(18) = cx * cz - sx * sy * sz; j_ang_(19) = -cx * sz - sx * sy * cz; j_ang_(20) = 0; j_ang_(21) = sx * cz + cx * sy * sz; j_ang_(22) = cx * sy * cz - sx * sz; j_ang_(23) = 0; j_ang_.moveToGpu(dj_ang_); if (compute_hessian) { h_ang_(0) = -cx * sz - sx * sy * cz; h_ang_(1) = -cx * cz + sx * sy * sz; h_ang_(2) = sx * cy; h_ang_(3) = -sx * sz + cx * sy * cz; h_ang_(4) = -cx * sy * sz - sx * cz; h_ang_(5) = -cx * cy; h_ang_(6) = cx * cy * cz; h_ang_(7) = -cx * cy * sz; h_ang_(8) = cx * sy; h_ang_(9) = sx * cy * cz; h_ang_(10) = -sx * cy * sz; h_ang_(11) = sx * sy; h_ang_(12) = -sx * cz - cx * sy * sz; h_ang_(13) = sx * sz - cx * sy * cz; h_ang_(14) = 0; h_ang_(15) = cx * cz - sx * sy * sz; h_ang_(16) = -sx * sy * cz - cx * sz; h_ang_(17) = 0; h_ang_(18) = -cy * cz; h_ang_(19) = cy * sz; h_ang_(20) = sy; h_ang_(21) = -sx * sy * cz; h_ang_(22) = sx * sy * sz; h_ang_(23) = sx * cy; h_ang_(24) = cx * sy * cz; h_ang_(25) = -cx * sy * sz; h_ang_(26) = -cx * cy; h_ang_(27) = sy * sz; h_ang_(28) = sy * cz; h_ang_(29) = 0; h_ang_(30) = -sx * cy * sz; h_ang_(31) = -sx * cy * cz; h_ang_(32) = 0; h_ang_(33) = cx * cy * sz; h_ang_(34) = cx * cy * cz; h_ang_(35) = 0; h_ang_(36) = -cy * cz; h_ang_(37) = cy * sz; h_ang_(38) = 0; h_ang_(39) = -cx * sz - sx * sy * cz; h_ang_(40) = -cx * cz + sx * sy * sz; h_ang_(41) = 0; h_ang_(42) = -sx * sz + cx * sy * cz; h_ang_(43) = -cx * sy * sz - sx * cz; h_ang_(44) = 0; h_ang_.moveToGpu(dh_ang_); } } __global__ void gpuTransform(float *in_x, float *in_y, float *in_z, float *trans_x, float *trans_y, float *trans_z, int point_num, MatrixDevice transform) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; float x, y, z; for (int i = idx; i < point_num; i += stride) { x = in_x[i]; y = in_y[i]; z = in_z[i]; trans_x[i] = transform(0, 0) * x + transform(0, 1) * y + transform(0, 2) * z + transform(0, 3); trans_y[i] = transform(1, 0) * x + transform(1, 1) * y + transform(1, 2) * z + transform(1, 3); trans_z[i] = transform(2, 0) * x + transform(2, 1) * y + transform(2, 2) * z + transform(2, 3); } } void GNormalDistributionsTransform::transformPointCloud(float *in_x, float *in_y, float *in_z, float *trans_x, float *trans_y, float *trans_z, int points_number, Eigen::Matrix<float, 4, 4> transform) { Eigen::Transform<float, 3, Eigen::Affine> t(transform); MatrixHost htrans(3, 4); MatrixDevice dtrans(3, 4); dtrans.memAlloc(); for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) { htrans(i, j) = t(i, j); } } htrans.moveToGpu(dtrans); if (points_number > 0) { int block_x = (points_number <= BLOCK_SIZE_X) ? points_number : BLOCK_SIZE_X; int grid_x = (points_number - 1) / block_x + 1; gpuTransform<<<grid_x, block_x >>>(in_x, in_y, in_z, trans_x, trans_y, trans_z, points_number, dtrans); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); } dtrans.memFree(); } double GNormalDistributionsTransform::computeStepLengthMT(const Eigen::Matrix<double, 6, 1> &x, Eigen::Matrix<double, 6, 1> &step_dir, double step_init, double step_max, double step_min, double &score, Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num) { double phi_0 = -score; double d_phi_0 = -(score_gradient.dot(step_dir)); Eigen::Matrix<double, 6, 1> x_t; if (d_phi_0 >= 0) { if (d_phi_0 == 0) return 0; else { d_phi_0 *= -1; step_dir *= -1; } } int max_step_iterations = 10; int step_iterations = 0; double mu = 1.e-4; double nu = 0.9; double a_l = 0, a_u = 0; double f_l = auxilaryFunction_PsiMT(a_l, phi_0, phi_0, d_phi_0, mu); double g_l = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu); double f_u = auxilaryFunction_PsiMT(a_u, phi_0, phi_0, d_phi_0, mu); double g_u = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu); bool interval_converged = (step_max - step_min) > 0, open_interval = true; double a_t = step_init; a_t = std::min(a_t, step_max); a_t = std::max(a_t, step_min); x_t = x + step_dir * a_t; Eigen::Translation<float, 3> translation(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))); Eigen::AngleAxis<float> tmp1(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()); Eigen::AngleAxis<float> tmp2(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()); Eigen::AngleAxis<float> tmp3(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ()); Eigen::AngleAxis<float> tmp4(tmp1 * tmp2 * tmp3); final_transformation_ = (translation * tmp4).matrix(); transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_); score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t); double phi_t = -score; double d_phi_t = -(score_gradient.dot(step_dir)); double psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu); double d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu); while (!interval_converged && step_iterations < max_step_iterations && !(psi_t <= 0 && d_phi_t <= -nu * d_phi_0)) { if (open_interval) { a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t); } else { a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t); } a_t = (a_t < step_max) ? a_t : step_max; a_t = (a_t > step_min) ? a_t : step_min; x_t = x + step_dir * a_t; translation = Eigen::Translation<float, 3>(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))); tmp1 = Eigen::AngleAxis<float>(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()); tmp2 = Eigen::AngleAxis<float>(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()); tmp3 = Eigen::AngleAxis<float>(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ()); tmp4 = tmp1 * tmp2 * tmp3; final_transformation_ = (translation * tmp4).matrix(); transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_); score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t, false); phi_t -= score; d_phi_t -= (score_gradient.dot(step_dir)); psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu); d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu); if (open_interval && (psi_t <= 0 && d_psi_t >= 0)) { open_interval = false; f_l += phi_0 - mu * d_phi_0 * a_l; g_l += mu * d_phi_0; f_u += phi_0 - mu * d_phi_0 * a_u; g_u += mu * d_phi_0; } if (open_interval) { interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t); } else { interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t); } step_iterations++; } if (step_iterations) { computeHessian(hessian, trans_x, trans_y, trans_z, points_num, x_t); } real_iterations_ += step_iterations; return a_t; } //Copied from ndt.hpp double GNormalDistributionsTransform::trialValueSelectionMT (double a_l, double f_l, double g_l, double a_u, double f_u, double g_u, double a_t, double f_t, double g_t) { // Case 1 in Trial Value Selection [More, Thuente 1994] if (f_t > f_l) { // Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l; double w = std::sqrt (z * z - g_t * g_l); // Equation 2.4.56 [Sun, Yuan 2006] double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w); // Calculate the minimizer of the quadratic that interpolates f_l, f_t and g_l // Equation 2.4.2 [Sun, Yuan 2006] double a_q = a_l - 0.5 * (a_l - a_t) * g_l / (g_l - (f_l - f_t) / (a_l - a_t)); if (std::fabs (a_c - a_l) < std::fabs (a_q - a_l)) return (a_c); else return (0.5 * (a_q + a_c)); } // Case 2 in Trial Value Selection [More, Thuente 1994] else if (g_t * g_l < 0) { // Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l; double w = std::sqrt (z * z - g_t * g_l); // Equation 2.4.56 [Sun, Yuan 2006] double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w); // Calculate the minimizer of the quadratic that interpolates f_l, g_l and g_t // Equation 2.4.5 [Sun, Yuan 2006] double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l; if (std::fabs (a_c - a_t) >= std::fabs (a_s - a_t)) return (a_c); else return (a_s); } // Case 3 in Trial Value Selection [More, Thuente 1994] else if (std::fabs (g_t) <= std::fabs (g_l)) { // Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l; double w = std::sqrt (z * z - g_t * g_l); double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w); // Calculate the minimizer of the quadratic that interpolates g_l and g_t // Equation 2.4.5 [Sun, Yuan 2006] double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l; double a_t_next; if (std::fabs (a_c - a_t) < std::fabs (a_s - a_t)) a_t_next = a_c; else a_t_next = a_s; if (a_t > a_l) return (std::min (a_t + 0.66 * (a_u - a_t), a_t_next)); else return (std::max (a_t + 0.66 * (a_u - a_t), a_t_next)); } // Case 4 in Trial Value Selection [More, Thuente 1994] else { // Calculate the minimizer of the cubic that interpolates f_u, f_t, g_u and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_u) / (a_t - a_u) - g_t - g_u; double w = std::sqrt (z * z - g_t * g_u); // Equation 2.4.56 [Sun, Yuan 2006] return (a_u + (a_t - a_u) * (w - g_u - z) / (g_t - g_u + 2 * w)); } } //Copied from ndt.hpp double GNormalDistributionsTransform::updateIntervalMT (double &a_l, double &f_l, double &g_l, double &a_u, double &f_u, double &g_u, double a_t, double f_t, double g_t) { // Case U1 in Update Algorithm and Case a in Modified Update Algorithm [More, Thuente 1994] if (f_t > f_l) { a_u = a_t; f_u = f_t; g_u = g_t; return (false); } // Case U2 in Update Algorithm and Case b in Modified Update Algorithm [More, Thuente 1994] else if (g_t * (a_l - a_t) > 0) { a_l = a_t; f_l = f_t; g_l = g_t; return (false); } // Case U3 in Update Algorithm and Case c in Modified Update Algorithm [More, Thuente 1994] else if (g_t * (a_l - a_t) < 0) { a_u = a_l; f_u = f_l; g_u = g_l; a_l = a_t; f_l = f_t; g_l = g_t; return (false); } // Interval Converged else return (true); } void GNormalDistributionsTransform::computeHessian(Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num, Eigen::Matrix<double, 6, 1> &p) { int *valid_points, *voxel_id, *starting_voxel_id; int valid_voxel_num, valid_points_num; //Radius Search voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX, &valid_points, &starting_voxel_id, &voxel_id, &valid_voxel_num, &valid_points_num); double *centroid = voxel_grid_.getCentroidList(); double *covariance = voxel_grid_.getCovarianceList(); double *inverse_covariance = voxel_grid_.getInverseCovarianceList(); int *points_per_voxel = voxel_grid_.getPointsPerVoxelList(); int voxel_num = voxel_grid_.getVoxelNum(); if (valid_points_num <= 0) return; //Update score gradient and hessian matrix double *hessians, *point_gradients, *point_hessians; checkCudaErrors(cudaMalloc(&hessians, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(cudaMalloc(&point_gradients, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(cudaMalloc(&point_hessians, sizeof(double) * valid_points_num * 18 * 6)); checkCudaErrors(cudaMemset(hessians, 0, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(cudaMemset(point_gradients, 0, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(cudaMemset(point_hessians, 0, sizeof(double) * valid_points_num * 18 * 6)); int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num; int grid_x = (valid_points_num - 1) / block_x + 1; dim3 grid; computePointGradients0<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients, point_gradients + valid_points_num * 7, point_gradients + valid_points_num * 14, point_gradients + valid_points_num * 9, point_gradients + valid_points_num * 15, point_gradients + valid_points_num * 4, point_gradients + valid_points_num * 10); checkCudaErrors(cudaGetLastError()); computePointGradients1<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients + valid_points_num * 16, point_gradients + valid_points_num * 5, point_gradients + valid_points_num * 11, point_gradients + valid_points_num * 17); checkCudaErrors(cudaGetLastError()); computePointHessian0<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 57, point_hessians + valid_points_num * 63, point_hessians + valid_points_num * 69, point_hessians + valid_points_num * 75, point_hessians + valid_points_num * 58, point_hessians + valid_points_num * 81, point_hessians + valid_points_num * 64, point_hessians + valid_points_num * 87, point_hessians + valid_points_num * 70, point_hessians + valid_points_num * 93, point_hessians + valid_points_num * 59, point_hessians + valid_points_num * 99, point_hessians + valid_points_num * 65, point_hessians + valid_points_num * 105, point_hessians + valid_points_num * 71); checkCudaErrors(cudaGetLastError()); computePointHessian1<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 76, point_hessians + valid_points_num * 82, point_hessians + valid_points_num * 88, point_hessians + valid_points_num * 94, point_hessians + valid_points_num * 77, point_hessians + valid_points_num * 100, point_hessians + valid_points_num * 83, point_hessians + valid_points_num * 106, point_hessians + valid_points_num * 89); checkCudaErrors(cudaGetLastError()); computePointHessian2<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 95, point_hessians + valid_points_num * 101, point_hessians + valid_points_num * 107); checkCudaErrors(cudaGetLastError()); double *tmp_hessian; checkCudaErrors(cudaMalloc(&tmp_hessian, sizeof(double) * valid_voxel_num * 6)); double *e_x_cov_x; checkCudaErrors(cudaMalloc(&e_x_cov_x, sizeof(double) * valid_voxel_num)); double *cov_dxd_pi; checkCudaErrors(cudaMalloc(&cov_dxd_pi, sizeof(double) * valid_voxel_num * 3 * 6)); computeExCovX<<<grid_x, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num); checkCudaErrors(cudaGetLastError()); grid.x = grid_x; grid.y = 3; grid.z = 6; computeCovDxdPi<<<grid, block_x>>>(valid_points, starting_voxel_id, voxel_id, valid_points_num, inverse_covariance, voxel_num, gauss_d1_, gauss_d2_, point_gradients, cov_dxd_pi, valid_voxel_num); checkCudaErrors(cudaGetLastError()); int block_x2 = (valid_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_voxel_num; int grid_x2 = (valid_voxel_num - 1) / block_x2 + 1; updateExCovX<<<grid_x2, block_x2>>>(e_x_cov_x, gauss_d2_, valid_voxel_num); checkCudaErrors(cudaGetLastError()); grid.y = 6; grid.z = 1; computeHessianListS0<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_gradients, tmp_hessian, valid_voxel_num); checkCudaErrors(cudaGetLastError()); grid.z = 6; computeHessianListS1<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, hessians, e_x_cov_x, tmp_hessian, cov_dxd_pi, point_gradients, valid_voxel_num); checkCudaErrors(cudaGetLastError()); computeHessianListS2<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_hessians, hessians, valid_voxel_num); checkCudaErrors(cudaGetLastError()); int full_size = valid_points_num; int half_size = (full_size - 1) / 2 + 1; while (full_size > 1) { block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size; grid_x = (half_size - 1) / block_x + 1; grid.x = grid_x; grid.y = 6; grid.z = 6; matrixSum<<<grid_x, block_x>>>(hessians, full_size, half_size, 6, 6, valid_points_num); full_size = half_size; half_size = (full_size - 1) / 2 + 1; } checkCudaErrors(cudaDeviceSynchronize()); MatrixDevice dhessian(6, 6, valid_points_num, hessians); MatrixHost hhessian(6, 6); hhessian.moveToHost(dhessian); for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { hessian(i, j) = hhessian(i, j); } } checkCudaErrors(cudaFree(hessians)); checkCudaErrors(cudaFree(point_hessians)); checkCudaErrors(cudaFree(point_gradients)); checkCudaErrors(cudaFree(tmp_hessian)); checkCudaErrors(cudaFree(e_x_cov_x)); checkCudaErrors(cudaFree(cov_dxd_pi)); if (valid_points != NULL) { checkCudaErrors(cudaFree(valid_points)); } if (voxel_id != NULL) { checkCudaErrors(cudaFree(voxel_id)); } if (starting_voxel_id != NULL) { checkCudaErrors(cudaFree(starting_voxel_id)); } dhessian.memFree(); } template <typename T> __global__ void gpuSum(T *input, int size, int half_size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < half_size; i += stride) { if (i + half_size < size) { input[i] += (half_size < size) ? input[i + half_size] : 0; } } } double GNormalDistributionsTransform::getFitnessScore(double max_range) { double fitness_score = 0.0; float *trans_x, *trans_y, *trans_z; checkCudaErrors(cudaMalloc(&trans_x, sizeof(float) * points_number_)); checkCudaErrors(cudaMalloc(&trans_y, sizeof(float) * points_number_)); checkCudaErrors(cudaMalloc(&trans_z, sizeof(float) * points_number_)); transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_number_, final_transformation_); int *valid_distance; checkCudaErrors(cudaMalloc(&valid_distance, sizeof(int) * points_number_)); double *min_distance; checkCudaErrors(cudaMalloc(&min_distance, sizeof(double) * points_number_)); voxel_grid_.nearestNeighborSearch(trans_x, trans_y, trans_z, points_number_, valid_distance, min_distance, max_range); int size = points_number_; int half_size; while (size > 1) { half_size = (size - 1) / 2 + 1; int block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size; int grid_x = (half_size - 1) / block_x + 1; gpuSum<double><<<grid_x, block_x>>>(min_distance, size, half_size); checkCudaErrors(cudaGetLastError()); gpuSum<int><<<grid_x, block_x>>>(valid_distance, size, half_size); checkCudaErrors(cudaGetLastError()); size = half_size; } checkCudaErrors(cudaDeviceSynchronize()); int nr; checkCudaErrors(cudaMemcpy(&nr, valid_distance, sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&fitness_score, min_distance, sizeof(double), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(trans_x)); checkCudaErrors(cudaFree(trans_y)); checkCudaErrors(cudaFree(trans_z)); checkCudaErrors(cudaFree(valid_distance)); checkCudaErrors(cudaFree(min_distance)); if (nr > 0) return (fitness_score / nr); return DBL_MAX; } }
e6a4d88005a0d5aba10ca2b709c4720bad5ef75b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017 XGBoost contributors */ #include <thrust/reduce.h> #include <thrust/execution_policy.h> #include <thrust/sequence.h> #include <xgboost/tree_updater.h> #include <algorithm> #include <memory> #include <queue> #include <utility> #include <vector> #include "../common/compressed_iterator.h" #include "../common/device_helpers.cuh" #include "../common/hist_util.h" #include "../common/timer.h" #include "param.h" #include "updater_gpu_common.cuh" namespace xgboost { namespace tree { DMLC_REGISTRY_FILE_TAG(updater_gpu_hist_experimental); typedef bst_gpair_precise gpair_sum_t; template <int BLOCK_THREADS, typename reduce_t, typename temp_storage_t> __device__ gpair_sum_t ReduceFeature(const gpair_sum_t* begin, const gpair_sum_t* end, temp_storage_t* temp_storage) { __shared__ cub::Uninitialized<gpair_sum_t> uninitialized_sum; gpair_sum_t& shared_sum = uninitialized_sum.Alias(); gpair_sum_t local_sum = gpair_sum_t(); for (auto itr = begin; itr < end; itr += BLOCK_THREADS) { bool thread_active = itr + threadIdx.x < end; // Scan histogram gpair_sum_t bin = thread_active ? *(itr + threadIdx.x) : gpair_sum_t(); local_sum += reduce_t(temp_storage->sum_reduce).Reduce(bin, hipcub::Sum()); } if (threadIdx.x == 0) { shared_sum = local_sum; } __syncthreads(); return shared_sum; } template <int BLOCK_THREADS, typename reduce_t, typename scan_t, typename max_reduce_t, typename temp_storage_t> __device__ void EvaluateFeature(int fidx, const gpair_sum_t* hist, const int* feature_segments, float min_fvalue, const float* gidx_fvalue_map, DeviceSplitCandidate* best_split, const DeviceNodeStats& node, const GPUTrainingParam& param, temp_storage_t* temp_storage) { int gidx_begin = feature_segments[fidx]; int gidx_end = feature_segments[fidx + 1]; gpair_sum_t feature_sum = ReduceFeature<BLOCK_THREADS, reduce_t>( hist + gidx_begin, hist + gidx_end, temp_storage); auto prefix_op = SumCallbackOp<gpair_sum_t>(); for (int scan_begin = gidx_begin; scan_begin < gidx_end; scan_begin += BLOCK_THREADS) { bool thread_active = scan_begin + threadIdx.x < gidx_end; gpair_sum_t bin = thread_active ? hist[scan_begin + threadIdx.x] : gpair_sum_t(); scan_t(temp_storage->scan).ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op); // Calculate gain gpair_sum_t parent_sum = gpair_sum_t(node.sum_gradients); gpair_sum_t missing = parent_sum - feature_sum; bool missing_left = true; const float null_gain = -FLT_MAX; float gain = null_gain; if (thread_active) { gain = loss_chg_missing(bin, missing, parent_sum, node.root_gain, param, missing_left); } __syncthreads(); // Find thread with best gain hipcub::KeyValuePair<int, float> tuple(threadIdx.x, gain); hipcub::KeyValuePair<int, float> best = max_reduce_t(temp_storage->max_reduce).Reduce(tuple, hipcub::ArgMax()); __shared__ hipcub::KeyValuePair<int, float> block_max; if (threadIdx.x == 0) { block_max = best; } __syncthreads(); // Best thread updates split if (threadIdx.x == block_max.key) { int gidx = scan_begin + threadIdx.x; float fvalue = gidx == gidx_begin ? min_fvalue : gidx_fvalue_map[gidx - 1]; gpair_sum_t left = missing_left ? bin + missing : bin; gpair_sum_t right = parent_sum - left; best_split->Update(gain, missing_left ? LeftDir : RightDir, fvalue, fidx, left, right, param); } __syncthreads(); } } template <int BLOCK_THREADS> __global__ void evaluate_split_kernel( const gpair_sum_t* d_hist, int nidx, uint64_t n_features, DeviceNodeStats nodes, const int* d_feature_segments, const float* d_fidx_min_map, const float* d_gidx_fvalue_map, GPUTrainingParam gpu_param, DeviceSplitCandidate* d_split) { typedef hipcub::KeyValuePair<int, float> ArgMaxT; typedef hipcub::BlockScan<gpair_sum_t, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS> BlockScanT; typedef hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT; typedef hipcub::BlockReduce<gpair_sum_t, BLOCK_THREADS> SumReduceT; union TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; __shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split; DeviceSplitCandidate& best_split = uninitialized_split.Alias(); __shared__ TempStorage temp_storage; if (threadIdx.x == 0) { best_split = DeviceSplitCandidate(); } __syncthreads(); auto fidx = blockIdx.x; EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>( fidx, d_hist, d_feature_segments, d_fidx_min_map[fidx], d_gidx_fvalue_map, &best_split, nodes, gpu_param, &temp_storage); __syncthreads(); if (threadIdx.x == 0) { // Record best loss d_split[fidx] = best_split; } } // Find a gidx value for a given feature otherwise return -1 if not found template <typename gidx_iter_t> __device__ int BinarySearchRow(bst_uint begin, bst_uint end, gidx_iter_t data, int fidx_begin, int fidx_end) { bst_uint previous_middle = UINT32_MAX; while (end != begin) { auto middle = begin + (end - begin) / 2; if (middle == previous_middle) { break; } previous_middle = middle; auto gidx = data[middle]; if (gidx >= fidx_begin && gidx < fidx_end) { return gidx; } else if (gidx < fidx_begin) { begin = middle; } else { end = middle; } } // Value is missing return -1; } struct DeviceHistogram { dh::bulk_allocator<dh::memory_type::DEVICE> ba; dh::dvec<gpair_sum_t> data; int n_bins; void Init(int device_idx, int max_nodes, int n_bins, bool silent) { this->n_bins = n_bins; ba.allocate(device_idx, silent, &data, size_t(max_nodes) * size_t(n_bins)); } void Reset() { data.fill(gpair_sum_t()); } gpair_sum_t* GetHistPtr(int nidx) { return data.data() + nidx * n_bins; } void PrintNidx(int nidx) const { auto h_data = data.as_vector(); std::cout << "nidx " << nidx << ":\n"; for (int i = n_bins * nidx; i < n_bins * (nidx + 1); i++) { std::cout << h_data[i] << " "; } std::cout << "\n"; } }; // Manage memory for a single GPU struct DeviceShard { struct Segment { size_t begin; size_t end; Segment() : begin(0), end(0) {} Segment(size_t begin, size_t end) : begin(begin), end(end) { CHECK_GE(end, begin); } size_t Size() const { return end - begin; } }; int device_idx; int normalised_device_idx; // Device index counting from param.gpu_id dh::bulk_allocator<dh::memory_type::DEVICE> ba; dh::dvec<common::compressed_byte_t> gidx_buffer; dh::dvec<bst_gpair> gpair; dh::dvec2<bst_uint> ridx; // Row index relative to this shard dh::dvec2<int> position; std::vector<Segment> ridx_segments; dh::dvec<int> feature_segments; dh::dvec<float> gidx_fvalue_map; dh::dvec<float> min_fvalue; std::vector<bst_gpair> node_sum_gradients; common::CompressedIterator<uint32_t> gidx; int row_stride; bst_uint row_begin_idx; // The row offset for this shard bst_uint row_end_idx; bst_uint n_rows; int n_bins; int null_gidx_value; DeviceHistogram hist; TrainParam param; int64_t* tmp_pinned; // Small amount of staging memory std::vector<hipStream_t> streams; dh::CubMemory temp_memory; DeviceShard(int device_idx, int normalised_device_idx, const common::GHistIndexMatrix& gmat, bst_uint row_begin, bst_uint row_end, int n_bins, TrainParam param) : device_idx(device_idx), normalised_device_idx(normalised_device_idx), row_begin_idx(row_begin), row_end_idx(row_end), n_rows(row_end - row_begin), n_bins(n_bins), null_gidx_value(n_bins), param(param) { // Convert to ELLPACK matrix representation int max_elements_row = 0; for (auto i = row_begin; i < row_end; i++) { max_elements_row = (std::max)(max_elements_row, static_cast<int>(gmat.row_ptr[i + 1] - gmat.row_ptr[i])); } row_stride = max_elements_row; std::vector<int> ellpack_matrix(row_stride * n_rows, null_gidx_value); for (auto i = row_begin; i < row_end; i++) { int row_count = 0; for (auto j = gmat.row_ptr[i]; j < gmat.row_ptr[i + 1]; j++) { ellpack_matrix[(i - row_begin) * row_stride + row_count] = gmat.index[j]; row_count++; } } // Allocate int num_symbols = n_bins + 1; size_t compressed_size_bytes = common::CompressedBufferWriter::CalculateBufferSize( ellpack_matrix.size(), num_symbols); int max_nodes = param.max_leaves > 0 ? param.max_leaves * 2 : n_nodes(param.max_depth); ba.allocate(device_idx, param.silent, &gidx_buffer, compressed_size_bytes, &gpair, n_rows, &ridx, n_rows, &position, n_rows, &feature_segments, gmat.cut->row_ptr.size(), &gidx_fvalue_map, gmat.cut->cut.size(), &min_fvalue, gmat.cut->min_val.size()); gidx_fvalue_map = gmat.cut->cut; min_fvalue = gmat.cut->min_val; feature_segments = gmat.cut->row_ptr; node_sum_gradients.resize(max_nodes); ridx_segments.resize(max_nodes); // Compress gidx common::CompressedBufferWriter cbw(num_symbols); std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size()); cbw.Write(host_buffer.data(), ellpack_matrix.begin(), ellpack_matrix.end()); gidx_buffer = host_buffer; gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), num_symbols); common::CompressedIterator<uint32_t> ci_host(host_buffer.data(), num_symbols); // Init histogram hist.Init(device_idx, max_nodes, gmat.cut->row_ptr.back(), param.silent); dh::safe_cuda(hipHostMalloc(&tmp_pinned, sizeof(int64_t))); } ~DeviceShard() { for (auto& stream : streams) { dh::safe_cuda(hipStreamDestroy(stream)); } dh::safe_cuda(hipHostFree(tmp_pinned)); } // Get vector of at least n initialised streams std::vector<hipStream_t>& GetStreams(int n) { if (n > streams.size()) { for (auto& stream : streams) { dh::safe_cuda(hipStreamDestroy(stream)); } streams.clear(); streams.resize(n); for (auto& stream : streams) { dh::safe_cuda(hipStreamCreate(&stream)); } } return streams; } // Reset values for each update iteration void Reset(const std::vector<bst_gpair>& host_gpair) { dh::safe_cuda(hipSetDevice(device_idx)); position.current_dvec().fill(0); std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), bst_gpair()); thrust::sequence(ridx.current_dvec().tbegin(), ridx.current_dvec().tend()); std::fill(ridx_segments.begin(), ridx_segments.end(), Segment(0, 0)); ridx_segments.front() = Segment(0, ridx.size()); this->gpair.copy(host_gpair.begin() + row_begin_idx, host_gpair.begin() + row_end_idx); subsample_gpair(&gpair, param.subsample, row_begin_idx); hist.Reset(); } void BuildHist(int nidx) { auto segment = ridx_segments[nidx]; auto d_node_hist = hist.GetHistPtr(nidx); auto d_gidx = gidx; auto d_ridx = ridx.current(); auto d_gpair = gpair.data(); auto row_stride = this->row_stride; auto null_gidx_value = this->null_gidx_value; auto n_elements = segment.Size() * row_stride; dh::launch_n(device_idx, n_elements, [=] __device__(size_t idx) { int ridx = d_ridx[(idx / row_stride) + segment.begin]; int gidx = d_gidx[ridx * row_stride + idx % row_stride]; if (gidx != null_gidx_value) { AtomicAddGpair(d_node_hist + gidx, d_gpair[ridx]); } }); } void SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) { auto d_node_hist_parent = hist.GetHistPtr(nidx_parent); auto d_node_hist_histogram = hist.GetHistPtr(nidx_histogram); auto d_node_hist_subtraction = hist.GetHistPtr(nidx_subtraction); dh::launch_n(device_idx, hist.n_bins, [=] __device__(size_t idx) { d_node_hist_subtraction[idx] = d_node_hist_parent[idx] - d_node_hist_histogram[idx]; }); } __device__ void CountLeft(int64_t* d_count, int val, int left_nidx) { unsigned ballot = __ballot(val == left_nidx); if (threadIdx.x % 32 == 0) { atomicAdd(reinterpret_cast<unsigned long long*>(d_count), // NOLINT static_cast<unsigned long long>(__popc(ballot))); // NOLINT } } void UpdatePosition(int nidx, int left_nidx, int right_nidx, int fidx, int split_gidx, bool default_dir_left, bool is_dense, int fidx_begin, int fidx_end) { dh::safe_cuda(hipSetDevice(device_idx)); temp_memory.LazyAllocate(sizeof(int64_t)); auto d_left_count = temp_memory.Pointer<int64_t>(); dh::safe_cuda(hipMemset(d_left_count, 0, sizeof(int64_t))); auto segment = ridx_segments[nidx]; auto d_ridx = ridx.current(); auto d_position = position.current(); auto d_gidx = gidx; auto row_stride = this->row_stride; dh::launch_n<1, 512>( device_idx, segment.Size(), [=] __device__(bst_uint idx) { idx += segment.begin; auto ridx = d_ridx[idx]; auto row_begin = row_stride * ridx; auto row_end = row_begin + row_stride; auto gidx = -1; if (is_dense) { gidx = d_gidx[row_begin + fidx]; } else { gidx = BinarySearchRow(row_begin, row_end, d_gidx, fidx_begin, fidx_end); } int position; if (gidx >= 0) { // Feature is found position = gidx <= split_gidx ? left_nidx : right_nidx; } else { // Feature is missing position = default_dir_left ? left_nidx : right_nidx; } CountLeft(d_left_count, position, left_nidx); d_position[idx] = position; }); dh::safe_cuda(hipMemcpy(tmp_pinned, d_left_count, sizeof(int64_t), hipMemcpyDeviceToHost)); auto left_count = *tmp_pinned; SortPosition(segment, left_nidx, right_nidx); // dh::safe_cuda(hipStreamSynchronize(stream)); ridx_segments[left_nidx] = Segment(segment.begin, segment.begin + left_count); ridx_segments[right_nidx] = Segment(segment.begin + left_count, segment.end); } void SortPosition(const Segment& segment, int left_nidx, int right_nidx) { int min_bits = 0; int max_bits = static_cast<int>( ::ceil(std::log2((std::max)(left_nidx, right_nidx) + 1))); size_t temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, position.current() + segment.begin, position.other() + segment.begin, ridx.current() + segment.begin, ridx.other() + segment.begin, segment.Size(), min_bits, max_bits); temp_memory.LazyAllocate(temp_storage_bytes); hipcub::DeviceRadixSort::SortPairs( temp_memory.d_temp_storage, temp_memory.temp_storage_bytes, position.current() + segment.begin, position.other() + segment.begin, ridx.current() + segment.begin, ridx.other() + segment.begin, segment.Size(), min_bits, max_bits); dh::safe_cuda(hipMemcpy( position.current() + segment.begin, position.other() + segment.begin, segment.Size() * sizeof(int), hipMemcpyDeviceToDevice)); dh::safe_cuda(hipMemcpy( ridx.current() + segment.begin, ridx.other() + segment.begin, segment.Size() * sizeof(bst_uint), hipMemcpyDeviceToDevice)); } }; class GPUHistMakerExperimental : public TreeUpdater { public: struct ExpandEntry; GPUHistMakerExperimental() : initialised(false) {} ~GPUHistMakerExperimental() {} void Init( const std::vector<std::pair<std::string, std::string>>& args) override { param.InitAllowUnknown(args); CHECK(param.n_gpus != 0) << "Must have at least one device"; n_devices = param.n_gpus; dh::check_compute_capability(); if (param.grow_policy == TrainParam::kLossGuide) { qexpand_.reset(new ExpandQueue(loss_guide)); } else { qexpand_.reset(new ExpandQueue(depth_wise)); } monitor.Init("updater_gpu_hist_experimental", param.debug_verbose); } void Update(const std::vector<bst_gpair>& gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { monitor.Start("Update"); GradStats::CheckInfo(dmat->info()); // rescale learning rate according to size of trees float lr = param.learning_rate; param.learning_rate = lr / trees.size(); // build tree try { for (size_t i = 0; i < trees.size(); ++i) { this->UpdateTree(gpair, dmat, trees[i]); } } catch (const std::exception& e) { LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl; } param.learning_rate = lr; monitor.Stop("Update"); } void InitDataOnce(DMatrix* dmat) { info = &dmat->info(); monitor.Start("Quantiles"); hmat_.Init(dmat, param.max_bin); gmat_.cut = &hmat_; gmat_.Init(dmat); monitor.Stop("Quantiles"); n_bins = hmat_.row_ptr.back(); int n_devices = dh::n_devices(param.n_gpus, info->num_row); bst_uint row_begin = 0; bst_uint shard_size = ::ceil(static_cast<double>(info->num_row) / n_devices); std::vector<int> dList(n_devices); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices(); dList[d_idx] = device_idx; } reducer.Init(dList); // Partition input matrix into row segments std::vector<size_t> row_segments; shards.resize(n_devices); row_segments.push_back(0); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { bst_uint row_end = ::min(static_cast<size_t>(row_begin + shard_size), info->num_row); row_segments.push_back(row_end); row_begin = row_end; } // Create device shards omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); shards[cpu_thread_id] = std::unique_ptr<DeviceShard>( new DeviceShard(dList[cpu_thread_id], cpu_thread_id, gmat_, row_segments[cpu_thread_id], row_segments[cpu_thread_id + 1], n_bins, param)); } initialised = true; } void InitData(const std::vector<bst_gpair>& gpair, DMatrix* dmat, const RegTree& tree) { monitor.Start("InitDataOnce"); if (!initialised) { CheckGradientMax(gpair); this->InitDataOnce(dmat); } monitor.Stop("InitDataOnce"); column_sampler.Init(info->num_col, param); // Copy gpair & reset memory monitor.Start("InitDataReset"); omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); shards[cpu_thread_id]->Reset(gpair); } monitor.Stop("InitDataReset"); } void AllReduceHist(int nidx) { for (auto& shard : shards) { auto d_node_hist = shard->hist.GetHistPtr(nidx); reducer.AllReduceSum( shard->normalised_device_idx, reinterpret_cast<gpair_sum_t::value_t*>(d_node_hist), reinterpret_cast<gpair_sum_t::value_t*>(d_node_hist), n_bins * (sizeof(gpair_sum_t) / sizeof(gpair_sum_t::value_t))); } reducer.Synchronize(); } void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right) { size_t left_node_max_elements = 0; size_t right_node_max_elements = 0; for (auto& shard : shards) { left_node_max_elements = (std::max)( left_node_max_elements, shard->ridx_segments[nidx_left].Size()); right_node_max_elements = (std::max)( right_node_max_elements, shard->ridx_segments[nidx_right].Size()); } auto build_hist_nidx = nidx_left; auto subtraction_trick_nidx = nidx_right; if (right_node_max_elements < left_node_max_elements) { build_hist_nidx = nidx_right; subtraction_trick_nidx = nidx_left; } for (auto& shard : shards) { shard->BuildHist(build_hist_nidx); } this->AllReduceHist(build_hist_nidx); for (auto& shard : shards) { shard->SubtractionTrick(nidx_parent, build_hist_nidx, subtraction_trick_nidx); } } // Returns best loss std::vector<DeviceSplitCandidate> EvaluateSplits( const std::vector<int>& nidx_set, RegTree* p_tree) { auto columns = info->num_col; std::vector<DeviceSplitCandidate> best_splits(nidx_set.size()); std::vector<DeviceSplitCandidate> candidate_splits(nidx_set.size() * columns); // Use first device auto& shard = shards.front(); dh::safe_cuda(hipSetDevice(shard->device_idx)); shard->temp_memory.LazyAllocate(sizeof(DeviceSplitCandidate) * columns * nidx_set.size()); auto d_split = shard->temp_memory.Pointer<DeviceSplitCandidate>(); auto& streams = shard->GetStreams(static_cast<int>(nidx_set.size())); // Use streams to process nodes concurrently for (auto i = 0; i < nidx_set.size(); i++) { auto nidx = nidx_set[i]; DeviceNodeStats node(shard->node_sum_gradients[nidx], nidx, param); const int BLOCK_THREADS = 256; hipLaunchKernelGGL(( evaluate_split_kernel<BLOCK_THREADS>) , dim3(uint32_t(columns)), dim3(BLOCK_THREADS), 0, streams[i], shard->hist.GetHistPtr(nidx), nidx, info->num_col, node, shard->feature_segments.data(), shard->min_fvalue.data(), shard->gidx_fvalue_map.data(), GPUTrainingParam(param), d_split + i * columns); } dh::safe_cuda( hipMemcpy(candidate_splits.data(), shard->temp_memory.d_temp_storage, sizeof(DeviceSplitCandidate) * columns * nidx_set.size(), hipMemcpyDeviceToHost)); for (auto i = 0; i < nidx_set.size(); i++) { auto nidx = nidx_set[i]; DeviceSplitCandidate nidx_best; for (auto fidx = 0; fidx < columns; fidx++) { auto& candidate = candidate_splits[i * columns + fidx]; if (column_sampler.ColumnUsed(candidate.findex, p_tree->GetDepth(nidx))) { nidx_best.Update(candidate_splits[i * columns + fidx], param); } } best_splits[i] = nidx_best; } return std::move(best_splits); } void InitRoot(const std::vector<bst_gpair>& gpair, RegTree* p_tree) { auto root_nidx = 0; // Sum gradients std::vector<bst_gpair> tmp_sums(shards.size()); omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); dh::safe_cuda(hipSetDevice(shards[cpu_thread_id]->device_idx)); tmp_sums[cpu_thread_id] = thrust::reduce(thrust::hip::par(shards[cpu_thread_id]->temp_memory), shards[cpu_thread_id]->gpair.tbegin(), shards[cpu_thread_id]->gpair.tend()); } auto sum_gradient = std::accumulate(tmp_sums.begin(), tmp_sums.end(), bst_gpair()); // Generate root histogram for (auto& shard : shards) { shard->BuildHist(root_nidx); } this->AllReduceHist(root_nidx); // Remember root stats p_tree->stat(root_nidx).sum_hess = sum_gradient.GetHess(); p_tree->stat(root_nidx).base_weight = CalcWeight(param, sum_gradient); // Store sum gradients for (auto& shard : shards) { shard->node_sum_gradients[root_nidx] = sum_gradient; } // Generate first split auto splits = this->EvaluateSplits({root_nidx}, p_tree); qexpand_->push( ExpandEntry(root_nidx, p_tree->GetDepth(root_nidx), splits.front(), 0)); } void UpdatePosition(const ExpandEntry& candidate, RegTree* p_tree) { auto nidx = candidate.nid; auto left_nidx = (*p_tree)[nidx].cleft(); auto right_nidx = (*p_tree)[nidx].cright(); // convert floating-point split_pt into corresponding bin_id // split_cond = -1 indicates that split_pt is less than all known cut points auto split_gidx = -1; auto fidx = candidate.split.findex; auto default_dir_left = candidate.split.dir == LeftDir; auto fidx_begin = hmat_.row_ptr[fidx]; auto fidx_end = hmat_.row_ptr[fidx + 1]; for (auto i = fidx_begin; i < fidx_end; ++i) { if (candidate.split.fvalue == hmat_.cut[i]) { split_gidx = static_cast<int32_t>(i); } } auto is_dense = info->num_nonzero == info->num_row * info->num_col; omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); shards[cpu_thread_id]->UpdatePosition(nidx, left_nidx, right_nidx, fidx, split_gidx, default_dir_left, is_dense, fidx_begin, fidx_end); } } void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) { // Add new leaves RegTree& tree = *p_tree; tree.AddChilds(candidate.nid); auto& parent = tree[candidate.nid]; parent.set_split(candidate.split.findex, candidate.split.fvalue, candidate.split.dir == LeftDir); tree.stat(candidate.nid).loss_chg = candidate.split.loss_chg; // Configure left child auto left_weight = CalcWeight(param, candidate.split.left_sum); tree[parent.cleft()].set_leaf(left_weight * param.learning_rate, 0); tree.stat(parent.cleft()).base_weight = left_weight; tree.stat(parent.cleft()).sum_hess = candidate.split.left_sum.GetHess(); // Configure right child auto right_weight = CalcWeight(param, candidate.split.right_sum); tree[parent.cright()].set_leaf(right_weight * param.learning_rate, 0); tree.stat(parent.cright()).base_weight = right_weight; tree.stat(parent.cright()).sum_hess = candidate.split.right_sum.GetHess(); // Store sum gradients for (auto& shard : shards) { shard->node_sum_gradients[parent.cleft()] = candidate.split.left_sum; shard->node_sum_gradients[parent.cright()] = candidate.split.right_sum; } this->UpdatePosition(candidate, p_tree); } void UpdateTree(const std::vector<bst_gpair>& gpair, DMatrix* p_fmat, RegTree* p_tree) { auto& tree = *p_tree; monitor.Start("InitData"); this->InitData(gpair, p_fmat, *p_tree); monitor.Stop("InitData"); monitor.Start("InitRoot"); this->InitRoot(gpair, p_tree); monitor.Stop("InitRoot"); auto timestamp = qexpand_->size(); auto num_leaves = 1; while (!qexpand_->empty()) { auto candidate = qexpand_->top(); qexpand_->pop(); if (!candidate.IsValid(param, num_leaves)) continue; // std::cout << candidate; monitor.Start("ApplySplit"); this->ApplySplit(candidate, p_tree); monitor.Stop("ApplySplit"); num_leaves++; auto left_child_nidx = tree[candidate.nid].cleft(); auto right_child_nidx = tree[candidate.nid].cright(); // Only create child entries if needed if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx), num_leaves)) { monitor.Start("BuildHist"); this->BuildHistLeftRight(candidate.nid, left_child_nidx, right_child_nidx); monitor.Stop("BuildHist"); monitor.Start("EvaluateSplits"); auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx}, p_tree); qexpand_->push(ExpandEntry(left_child_nidx, tree.GetDepth(left_child_nidx), splits[0], timestamp++)); qexpand_->push(ExpandEntry(right_child_nidx, tree.GetDepth(right_child_nidx), splits[1], timestamp++)); monitor.Stop("EvaluateSplits"); } } } struct ExpandEntry { int nid; int depth; DeviceSplitCandidate split; uint64_t timestamp; ExpandEntry(int nid, int depth, const DeviceSplitCandidate& split, uint64_t timestamp) : nid(nid), depth(depth), split(split), timestamp(timestamp) {} bool IsValid(const TrainParam& param, int num_leaves) const { if (split.loss_chg <= rt_eps) return false; if (param.max_depth > 0 && depth == param.max_depth) return false; if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false; return true; } static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) { if (param.max_depth > 0 && depth == param.max_depth) return false; if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false; return true; } friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) { os << "ExpandEntry: \n"; os << "nidx: " << e.nid << "\n"; os << "depth: " << e.depth << "\n"; os << "loss: " << e.split.loss_chg << "\n"; os << "left_sum: " << e.split.left_sum << "\n"; os << "right_sum: " << e.split.right_sum << "\n"; return os; } }; inline static bool depth_wise(ExpandEntry lhs, ExpandEntry rhs) { if (lhs.depth == rhs.depth) { return lhs.timestamp > rhs.timestamp; // favor small timestamp } else { return lhs.depth > rhs.depth; // favor small depth } } inline static bool loss_guide(ExpandEntry lhs, ExpandEntry rhs) { if (lhs.split.loss_chg == rhs.split.loss_chg) { return lhs.timestamp > rhs.timestamp; // favor small timestamp } else { return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg } } TrainParam param; common::HistCutMatrix hmat_; common::GHistIndexMatrix gmat_; MetaInfo* info; bool initialised; int n_devices; int n_bins; std::vector<std::unique_ptr<DeviceShard>> shards; ColumnSampler column_sampler; typedef std::priority_queue<ExpandEntry, std::vector<ExpandEntry>, std::function<bool(ExpandEntry, ExpandEntry)>> ExpandQueue; std::unique_ptr<ExpandQueue> qexpand_; common::Monitor monitor; dh::AllReducer reducer; }; XGBOOST_REGISTER_TREE_UPDATER(GPUHistMakerExperimental, "grow_gpu_hist_experimental") .describe("Grow tree with GPU.") .set_body([]() { return new GPUHistMakerExperimental(); }); } // namespace tree } // namespace xgboost
e6a4d88005a0d5aba10ca2b709c4720bad5ef75b.cu
/*! * Copyright 2017 XGBoost contributors */ #include <thrust/reduce.h> #include <thrust/execution_policy.h> #include <thrust/sequence.h> #include <xgboost/tree_updater.h> #include <algorithm> #include <memory> #include <queue> #include <utility> #include <vector> #include "../common/compressed_iterator.h" #include "../common/device_helpers.cuh" #include "../common/hist_util.h" #include "../common/timer.h" #include "param.h" #include "updater_gpu_common.cuh" namespace xgboost { namespace tree { DMLC_REGISTRY_FILE_TAG(updater_gpu_hist_experimental); typedef bst_gpair_precise gpair_sum_t; template <int BLOCK_THREADS, typename reduce_t, typename temp_storage_t> __device__ gpair_sum_t ReduceFeature(const gpair_sum_t* begin, const gpair_sum_t* end, temp_storage_t* temp_storage) { __shared__ cub::Uninitialized<gpair_sum_t> uninitialized_sum; gpair_sum_t& shared_sum = uninitialized_sum.Alias(); gpair_sum_t local_sum = gpair_sum_t(); for (auto itr = begin; itr < end; itr += BLOCK_THREADS) { bool thread_active = itr + threadIdx.x < end; // Scan histogram gpair_sum_t bin = thread_active ? *(itr + threadIdx.x) : gpair_sum_t(); local_sum += reduce_t(temp_storage->sum_reduce).Reduce(bin, cub::Sum()); } if (threadIdx.x == 0) { shared_sum = local_sum; } __syncthreads(); return shared_sum; } template <int BLOCK_THREADS, typename reduce_t, typename scan_t, typename max_reduce_t, typename temp_storage_t> __device__ void EvaluateFeature(int fidx, const gpair_sum_t* hist, const int* feature_segments, float min_fvalue, const float* gidx_fvalue_map, DeviceSplitCandidate* best_split, const DeviceNodeStats& node, const GPUTrainingParam& param, temp_storage_t* temp_storage) { int gidx_begin = feature_segments[fidx]; int gidx_end = feature_segments[fidx + 1]; gpair_sum_t feature_sum = ReduceFeature<BLOCK_THREADS, reduce_t>( hist + gidx_begin, hist + gidx_end, temp_storage); auto prefix_op = SumCallbackOp<gpair_sum_t>(); for (int scan_begin = gidx_begin; scan_begin < gidx_end; scan_begin += BLOCK_THREADS) { bool thread_active = scan_begin + threadIdx.x < gidx_end; gpair_sum_t bin = thread_active ? hist[scan_begin + threadIdx.x] : gpair_sum_t(); scan_t(temp_storage->scan).ExclusiveScan(bin, bin, cub::Sum(), prefix_op); // Calculate gain gpair_sum_t parent_sum = gpair_sum_t(node.sum_gradients); gpair_sum_t missing = parent_sum - feature_sum; bool missing_left = true; const float null_gain = -FLT_MAX; float gain = null_gain; if (thread_active) { gain = loss_chg_missing(bin, missing, parent_sum, node.root_gain, param, missing_left); } __syncthreads(); // Find thread with best gain cub::KeyValuePair<int, float> tuple(threadIdx.x, gain); cub::KeyValuePair<int, float> best = max_reduce_t(temp_storage->max_reduce).Reduce(tuple, cub::ArgMax()); __shared__ cub::KeyValuePair<int, float> block_max; if (threadIdx.x == 0) { block_max = best; } __syncthreads(); // Best thread updates split if (threadIdx.x == block_max.key) { int gidx = scan_begin + threadIdx.x; float fvalue = gidx == gidx_begin ? min_fvalue : gidx_fvalue_map[gidx - 1]; gpair_sum_t left = missing_left ? bin + missing : bin; gpair_sum_t right = parent_sum - left; best_split->Update(gain, missing_left ? LeftDir : RightDir, fvalue, fidx, left, right, param); } __syncthreads(); } } template <int BLOCK_THREADS> __global__ void evaluate_split_kernel( const gpair_sum_t* d_hist, int nidx, uint64_t n_features, DeviceNodeStats nodes, const int* d_feature_segments, const float* d_fidx_min_map, const float* d_gidx_fvalue_map, GPUTrainingParam gpu_param, DeviceSplitCandidate* d_split) { typedef cub::KeyValuePair<int, float> ArgMaxT; typedef cub::BlockScan<gpair_sum_t, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS> BlockScanT; typedef cub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT; typedef cub::BlockReduce<gpair_sum_t, BLOCK_THREADS> SumReduceT; union TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; __shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split; DeviceSplitCandidate& best_split = uninitialized_split.Alias(); __shared__ TempStorage temp_storage; if (threadIdx.x == 0) { best_split = DeviceSplitCandidate(); } __syncthreads(); auto fidx = blockIdx.x; EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>( fidx, d_hist, d_feature_segments, d_fidx_min_map[fidx], d_gidx_fvalue_map, &best_split, nodes, gpu_param, &temp_storage); __syncthreads(); if (threadIdx.x == 0) { // Record best loss d_split[fidx] = best_split; } } // Find a gidx value for a given feature otherwise return -1 if not found template <typename gidx_iter_t> __device__ int BinarySearchRow(bst_uint begin, bst_uint end, gidx_iter_t data, int fidx_begin, int fidx_end) { bst_uint previous_middle = UINT32_MAX; while (end != begin) { auto middle = begin + (end - begin) / 2; if (middle == previous_middle) { break; } previous_middle = middle; auto gidx = data[middle]; if (gidx >= fidx_begin && gidx < fidx_end) { return gidx; } else if (gidx < fidx_begin) { begin = middle; } else { end = middle; } } // Value is missing return -1; } struct DeviceHistogram { dh::bulk_allocator<dh::memory_type::DEVICE> ba; dh::dvec<gpair_sum_t> data; int n_bins; void Init(int device_idx, int max_nodes, int n_bins, bool silent) { this->n_bins = n_bins; ba.allocate(device_idx, silent, &data, size_t(max_nodes) * size_t(n_bins)); } void Reset() { data.fill(gpair_sum_t()); } gpair_sum_t* GetHistPtr(int nidx) { return data.data() + nidx * n_bins; } void PrintNidx(int nidx) const { auto h_data = data.as_vector(); std::cout << "nidx " << nidx << ":\n"; for (int i = n_bins * nidx; i < n_bins * (nidx + 1); i++) { std::cout << h_data[i] << " "; } std::cout << "\n"; } }; // Manage memory for a single GPU struct DeviceShard { struct Segment { size_t begin; size_t end; Segment() : begin(0), end(0) {} Segment(size_t begin, size_t end) : begin(begin), end(end) { CHECK_GE(end, begin); } size_t Size() const { return end - begin; } }; int device_idx; int normalised_device_idx; // Device index counting from param.gpu_id dh::bulk_allocator<dh::memory_type::DEVICE> ba; dh::dvec<common::compressed_byte_t> gidx_buffer; dh::dvec<bst_gpair> gpair; dh::dvec2<bst_uint> ridx; // Row index relative to this shard dh::dvec2<int> position; std::vector<Segment> ridx_segments; dh::dvec<int> feature_segments; dh::dvec<float> gidx_fvalue_map; dh::dvec<float> min_fvalue; std::vector<bst_gpair> node_sum_gradients; common::CompressedIterator<uint32_t> gidx; int row_stride; bst_uint row_begin_idx; // The row offset for this shard bst_uint row_end_idx; bst_uint n_rows; int n_bins; int null_gidx_value; DeviceHistogram hist; TrainParam param; int64_t* tmp_pinned; // Small amount of staging memory std::vector<cudaStream_t> streams; dh::CubMemory temp_memory; DeviceShard(int device_idx, int normalised_device_idx, const common::GHistIndexMatrix& gmat, bst_uint row_begin, bst_uint row_end, int n_bins, TrainParam param) : device_idx(device_idx), normalised_device_idx(normalised_device_idx), row_begin_idx(row_begin), row_end_idx(row_end), n_rows(row_end - row_begin), n_bins(n_bins), null_gidx_value(n_bins), param(param) { // Convert to ELLPACK matrix representation int max_elements_row = 0; for (auto i = row_begin; i < row_end; i++) { max_elements_row = (std::max)(max_elements_row, static_cast<int>(gmat.row_ptr[i + 1] - gmat.row_ptr[i])); } row_stride = max_elements_row; std::vector<int> ellpack_matrix(row_stride * n_rows, null_gidx_value); for (auto i = row_begin; i < row_end; i++) { int row_count = 0; for (auto j = gmat.row_ptr[i]; j < gmat.row_ptr[i + 1]; j++) { ellpack_matrix[(i - row_begin) * row_stride + row_count] = gmat.index[j]; row_count++; } } // Allocate int num_symbols = n_bins + 1; size_t compressed_size_bytes = common::CompressedBufferWriter::CalculateBufferSize( ellpack_matrix.size(), num_symbols); int max_nodes = param.max_leaves > 0 ? param.max_leaves * 2 : n_nodes(param.max_depth); ba.allocate(device_idx, param.silent, &gidx_buffer, compressed_size_bytes, &gpair, n_rows, &ridx, n_rows, &position, n_rows, &feature_segments, gmat.cut->row_ptr.size(), &gidx_fvalue_map, gmat.cut->cut.size(), &min_fvalue, gmat.cut->min_val.size()); gidx_fvalue_map = gmat.cut->cut; min_fvalue = gmat.cut->min_val; feature_segments = gmat.cut->row_ptr; node_sum_gradients.resize(max_nodes); ridx_segments.resize(max_nodes); // Compress gidx common::CompressedBufferWriter cbw(num_symbols); std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size()); cbw.Write(host_buffer.data(), ellpack_matrix.begin(), ellpack_matrix.end()); gidx_buffer = host_buffer; gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), num_symbols); common::CompressedIterator<uint32_t> ci_host(host_buffer.data(), num_symbols); // Init histogram hist.Init(device_idx, max_nodes, gmat.cut->row_ptr.back(), param.silent); dh::safe_cuda(cudaMallocHost(&tmp_pinned, sizeof(int64_t))); } ~DeviceShard() { for (auto& stream : streams) { dh::safe_cuda(cudaStreamDestroy(stream)); } dh::safe_cuda(cudaFreeHost(tmp_pinned)); } // Get vector of at least n initialised streams std::vector<cudaStream_t>& GetStreams(int n) { if (n > streams.size()) { for (auto& stream : streams) { dh::safe_cuda(cudaStreamDestroy(stream)); } streams.clear(); streams.resize(n); for (auto& stream : streams) { dh::safe_cuda(cudaStreamCreate(&stream)); } } return streams; } // Reset values for each update iteration void Reset(const std::vector<bst_gpair>& host_gpair) { dh::safe_cuda(cudaSetDevice(device_idx)); position.current_dvec().fill(0); std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), bst_gpair()); thrust::sequence(ridx.current_dvec().tbegin(), ridx.current_dvec().tend()); std::fill(ridx_segments.begin(), ridx_segments.end(), Segment(0, 0)); ridx_segments.front() = Segment(0, ridx.size()); this->gpair.copy(host_gpair.begin() + row_begin_idx, host_gpair.begin() + row_end_idx); subsample_gpair(&gpair, param.subsample, row_begin_idx); hist.Reset(); } void BuildHist(int nidx) { auto segment = ridx_segments[nidx]; auto d_node_hist = hist.GetHistPtr(nidx); auto d_gidx = gidx; auto d_ridx = ridx.current(); auto d_gpair = gpair.data(); auto row_stride = this->row_stride; auto null_gidx_value = this->null_gidx_value; auto n_elements = segment.Size() * row_stride; dh::launch_n(device_idx, n_elements, [=] __device__(size_t idx) { int ridx = d_ridx[(idx / row_stride) + segment.begin]; int gidx = d_gidx[ridx * row_stride + idx % row_stride]; if (gidx != null_gidx_value) { AtomicAddGpair(d_node_hist + gidx, d_gpair[ridx]); } }); } void SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) { auto d_node_hist_parent = hist.GetHistPtr(nidx_parent); auto d_node_hist_histogram = hist.GetHistPtr(nidx_histogram); auto d_node_hist_subtraction = hist.GetHistPtr(nidx_subtraction); dh::launch_n(device_idx, hist.n_bins, [=] __device__(size_t idx) { d_node_hist_subtraction[idx] = d_node_hist_parent[idx] - d_node_hist_histogram[idx]; }); } __device__ void CountLeft(int64_t* d_count, int val, int left_nidx) { unsigned ballot = __ballot(val == left_nidx); if (threadIdx.x % 32 == 0) { atomicAdd(reinterpret_cast<unsigned long long*>(d_count), // NOLINT static_cast<unsigned long long>(__popc(ballot))); // NOLINT } } void UpdatePosition(int nidx, int left_nidx, int right_nidx, int fidx, int split_gidx, bool default_dir_left, bool is_dense, int fidx_begin, int fidx_end) { dh::safe_cuda(cudaSetDevice(device_idx)); temp_memory.LazyAllocate(sizeof(int64_t)); auto d_left_count = temp_memory.Pointer<int64_t>(); dh::safe_cuda(cudaMemset(d_left_count, 0, sizeof(int64_t))); auto segment = ridx_segments[nidx]; auto d_ridx = ridx.current(); auto d_position = position.current(); auto d_gidx = gidx; auto row_stride = this->row_stride; dh::launch_n<1, 512>( device_idx, segment.Size(), [=] __device__(bst_uint idx) { idx += segment.begin; auto ridx = d_ridx[idx]; auto row_begin = row_stride * ridx; auto row_end = row_begin + row_stride; auto gidx = -1; if (is_dense) { gidx = d_gidx[row_begin + fidx]; } else { gidx = BinarySearchRow(row_begin, row_end, d_gidx, fidx_begin, fidx_end); } int position; if (gidx >= 0) { // Feature is found position = gidx <= split_gidx ? left_nidx : right_nidx; } else { // Feature is missing position = default_dir_left ? left_nidx : right_nidx; } CountLeft(d_left_count, position, left_nidx); d_position[idx] = position; }); dh::safe_cuda(cudaMemcpy(tmp_pinned, d_left_count, sizeof(int64_t), cudaMemcpyDeviceToHost)); auto left_count = *tmp_pinned; SortPosition(segment, left_nidx, right_nidx); // dh::safe_cuda(cudaStreamSynchronize(stream)); ridx_segments[left_nidx] = Segment(segment.begin, segment.begin + left_count); ridx_segments[right_nidx] = Segment(segment.begin + left_count, segment.end); } void SortPosition(const Segment& segment, int left_nidx, int right_nidx) { int min_bits = 0; int max_bits = static_cast<int>( std::ceil(std::log2((std::max)(left_nidx, right_nidx) + 1))); size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, position.current() + segment.begin, position.other() + segment.begin, ridx.current() + segment.begin, ridx.other() + segment.begin, segment.Size(), min_bits, max_bits); temp_memory.LazyAllocate(temp_storage_bytes); cub::DeviceRadixSort::SortPairs( temp_memory.d_temp_storage, temp_memory.temp_storage_bytes, position.current() + segment.begin, position.other() + segment.begin, ridx.current() + segment.begin, ridx.other() + segment.begin, segment.Size(), min_bits, max_bits); dh::safe_cuda(cudaMemcpy( position.current() + segment.begin, position.other() + segment.begin, segment.Size() * sizeof(int), cudaMemcpyDeviceToDevice)); dh::safe_cuda(cudaMemcpy( ridx.current() + segment.begin, ridx.other() + segment.begin, segment.Size() * sizeof(bst_uint), cudaMemcpyDeviceToDevice)); } }; class GPUHistMakerExperimental : public TreeUpdater { public: struct ExpandEntry; GPUHistMakerExperimental() : initialised(false) {} ~GPUHistMakerExperimental() {} void Init( const std::vector<std::pair<std::string, std::string>>& args) override { param.InitAllowUnknown(args); CHECK(param.n_gpus != 0) << "Must have at least one device"; n_devices = param.n_gpus; dh::check_compute_capability(); if (param.grow_policy == TrainParam::kLossGuide) { qexpand_.reset(new ExpandQueue(loss_guide)); } else { qexpand_.reset(new ExpandQueue(depth_wise)); } monitor.Init("updater_gpu_hist_experimental", param.debug_verbose); } void Update(const std::vector<bst_gpair>& gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { monitor.Start("Update"); GradStats::CheckInfo(dmat->info()); // rescale learning rate according to size of trees float lr = param.learning_rate; param.learning_rate = lr / trees.size(); // build tree try { for (size_t i = 0; i < trees.size(); ++i) { this->UpdateTree(gpair, dmat, trees[i]); } } catch (const std::exception& e) { LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl; } param.learning_rate = lr; monitor.Stop("Update"); } void InitDataOnce(DMatrix* dmat) { info = &dmat->info(); monitor.Start("Quantiles"); hmat_.Init(dmat, param.max_bin); gmat_.cut = &hmat_; gmat_.Init(dmat); monitor.Stop("Quantiles"); n_bins = hmat_.row_ptr.back(); int n_devices = dh::n_devices(param.n_gpus, info->num_row); bst_uint row_begin = 0; bst_uint shard_size = std::ceil(static_cast<double>(info->num_row) / n_devices); std::vector<int> dList(n_devices); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices(); dList[d_idx] = device_idx; } reducer.Init(dList); // Partition input matrix into row segments std::vector<size_t> row_segments; shards.resize(n_devices); row_segments.push_back(0); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { bst_uint row_end = std::min(static_cast<size_t>(row_begin + shard_size), info->num_row); row_segments.push_back(row_end); row_begin = row_end; } // Create device shards omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); shards[cpu_thread_id] = std::unique_ptr<DeviceShard>( new DeviceShard(dList[cpu_thread_id], cpu_thread_id, gmat_, row_segments[cpu_thread_id], row_segments[cpu_thread_id + 1], n_bins, param)); } initialised = true; } void InitData(const std::vector<bst_gpair>& gpair, DMatrix* dmat, const RegTree& tree) { monitor.Start("InitDataOnce"); if (!initialised) { CheckGradientMax(gpair); this->InitDataOnce(dmat); } monitor.Stop("InitDataOnce"); column_sampler.Init(info->num_col, param); // Copy gpair & reset memory monitor.Start("InitDataReset"); omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); shards[cpu_thread_id]->Reset(gpair); } monitor.Stop("InitDataReset"); } void AllReduceHist(int nidx) { for (auto& shard : shards) { auto d_node_hist = shard->hist.GetHistPtr(nidx); reducer.AllReduceSum( shard->normalised_device_idx, reinterpret_cast<gpair_sum_t::value_t*>(d_node_hist), reinterpret_cast<gpair_sum_t::value_t*>(d_node_hist), n_bins * (sizeof(gpair_sum_t) / sizeof(gpair_sum_t::value_t))); } reducer.Synchronize(); } void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right) { size_t left_node_max_elements = 0; size_t right_node_max_elements = 0; for (auto& shard : shards) { left_node_max_elements = (std::max)( left_node_max_elements, shard->ridx_segments[nidx_left].Size()); right_node_max_elements = (std::max)( right_node_max_elements, shard->ridx_segments[nidx_right].Size()); } auto build_hist_nidx = nidx_left; auto subtraction_trick_nidx = nidx_right; if (right_node_max_elements < left_node_max_elements) { build_hist_nidx = nidx_right; subtraction_trick_nidx = nidx_left; } for (auto& shard : shards) { shard->BuildHist(build_hist_nidx); } this->AllReduceHist(build_hist_nidx); for (auto& shard : shards) { shard->SubtractionTrick(nidx_parent, build_hist_nidx, subtraction_trick_nidx); } } // Returns best loss std::vector<DeviceSplitCandidate> EvaluateSplits( const std::vector<int>& nidx_set, RegTree* p_tree) { auto columns = info->num_col; std::vector<DeviceSplitCandidate> best_splits(nidx_set.size()); std::vector<DeviceSplitCandidate> candidate_splits(nidx_set.size() * columns); // Use first device auto& shard = shards.front(); dh::safe_cuda(cudaSetDevice(shard->device_idx)); shard->temp_memory.LazyAllocate(sizeof(DeviceSplitCandidate) * columns * nidx_set.size()); auto d_split = shard->temp_memory.Pointer<DeviceSplitCandidate>(); auto& streams = shard->GetStreams(static_cast<int>(nidx_set.size())); // Use streams to process nodes concurrently for (auto i = 0; i < nidx_set.size(); i++) { auto nidx = nidx_set[i]; DeviceNodeStats node(shard->node_sum_gradients[nidx], nidx, param); const int BLOCK_THREADS = 256; evaluate_split_kernel<BLOCK_THREADS> <<<uint32_t(columns), BLOCK_THREADS, 0, streams[i]>>>( shard->hist.GetHistPtr(nidx), nidx, info->num_col, node, shard->feature_segments.data(), shard->min_fvalue.data(), shard->gidx_fvalue_map.data(), GPUTrainingParam(param), d_split + i * columns); } dh::safe_cuda( cudaMemcpy(candidate_splits.data(), shard->temp_memory.d_temp_storage, sizeof(DeviceSplitCandidate) * columns * nidx_set.size(), cudaMemcpyDeviceToHost)); for (auto i = 0; i < nidx_set.size(); i++) { auto nidx = nidx_set[i]; DeviceSplitCandidate nidx_best; for (auto fidx = 0; fidx < columns; fidx++) { auto& candidate = candidate_splits[i * columns + fidx]; if (column_sampler.ColumnUsed(candidate.findex, p_tree->GetDepth(nidx))) { nidx_best.Update(candidate_splits[i * columns + fidx], param); } } best_splits[i] = nidx_best; } return std::move(best_splits); } void InitRoot(const std::vector<bst_gpair>& gpair, RegTree* p_tree) { auto root_nidx = 0; // Sum gradients std::vector<bst_gpair> tmp_sums(shards.size()); omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); dh::safe_cuda(cudaSetDevice(shards[cpu_thread_id]->device_idx)); tmp_sums[cpu_thread_id] = thrust::reduce(thrust::cuda::par(shards[cpu_thread_id]->temp_memory), shards[cpu_thread_id]->gpair.tbegin(), shards[cpu_thread_id]->gpair.tend()); } auto sum_gradient = std::accumulate(tmp_sums.begin(), tmp_sums.end(), bst_gpair()); // Generate root histogram for (auto& shard : shards) { shard->BuildHist(root_nidx); } this->AllReduceHist(root_nidx); // Remember root stats p_tree->stat(root_nidx).sum_hess = sum_gradient.GetHess(); p_tree->stat(root_nidx).base_weight = CalcWeight(param, sum_gradient); // Store sum gradients for (auto& shard : shards) { shard->node_sum_gradients[root_nidx] = sum_gradient; } // Generate first split auto splits = this->EvaluateSplits({root_nidx}, p_tree); qexpand_->push( ExpandEntry(root_nidx, p_tree->GetDepth(root_nidx), splits.front(), 0)); } void UpdatePosition(const ExpandEntry& candidate, RegTree* p_tree) { auto nidx = candidate.nid; auto left_nidx = (*p_tree)[nidx].cleft(); auto right_nidx = (*p_tree)[nidx].cright(); // convert floating-point split_pt into corresponding bin_id // split_cond = -1 indicates that split_pt is less than all known cut points auto split_gidx = -1; auto fidx = candidate.split.findex; auto default_dir_left = candidate.split.dir == LeftDir; auto fidx_begin = hmat_.row_ptr[fidx]; auto fidx_end = hmat_.row_ptr[fidx + 1]; for (auto i = fidx_begin; i < fidx_end; ++i) { if (candidate.split.fvalue == hmat_.cut[i]) { split_gidx = static_cast<int32_t>(i); } } auto is_dense = info->num_nonzero == info->num_row * info->num_col; omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); shards[cpu_thread_id]->UpdatePosition(nidx, left_nidx, right_nidx, fidx, split_gidx, default_dir_left, is_dense, fidx_begin, fidx_end); } } void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) { // Add new leaves RegTree& tree = *p_tree; tree.AddChilds(candidate.nid); auto& parent = tree[candidate.nid]; parent.set_split(candidate.split.findex, candidate.split.fvalue, candidate.split.dir == LeftDir); tree.stat(candidate.nid).loss_chg = candidate.split.loss_chg; // Configure left child auto left_weight = CalcWeight(param, candidate.split.left_sum); tree[parent.cleft()].set_leaf(left_weight * param.learning_rate, 0); tree.stat(parent.cleft()).base_weight = left_weight; tree.stat(parent.cleft()).sum_hess = candidate.split.left_sum.GetHess(); // Configure right child auto right_weight = CalcWeight(param, candidate.split.right_sum); tree[parent.cright()].set_leaf(right_weight * param.learning_rate, 0); tree.stat(parent.cright()).base_weight = right_weight; tree.stat(parent.cright()).sum_hess = candidate.split.right_sum.GetHess(); // Store sum gradients for (auto& shard : shards) { shard->node_sum_gradients[parent.cleft()] = candidate.split.left_sum; shard->node_sum_gradients[parent.cright()] = candidate.split.right_sum; } this->UpdatePosition(candidate, p_tree); } void UpdateTree(const std::vector<bst_gpair>& gpair, DMatrix* p_fmat, RegTree* p_tree) { auto& tree = *p_tree; monitor.Start("InitData"); this->InitData(gpair, p_fmat, *p_tree); monitor.Stop("InitData"); monitor.Start("InitRoot"); this->InitRoot(gpair, p_tree); monitor.Stop("InitRoot"); auto timestamp = qexpand_->size(); auto num_leaves = 1; while (!qexpand_->empty()) { auto candidate = qexpand_->top(); qexpand_->pop(); if (!candidate.IsValid(param, num_leaves)) continue; // std::cout << candidate; monitor.Start("ApplySplit"); this->ApplySplit(candidate, p_tree); monitor.Stop("ApplySplit"); num_leaves++; auto left_child_nidx = tree[candidate.nid].cleft(); auto right_child_nidx = tree[candidate.nid].cright(); // Only create child entries if needed if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx), num_leaves)) { monitor.Start("BuildHist"); this->BuildHistLeftRight(candidate.nid, left_child_nidx, right_child_nidx); monitor.Stop("BuildHist"); monitor.Start("EvaluateSplits"); auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx}, p_tree); qexpand_->push(ExpandEntry(left_child_nidx, tree.GetDepth(left_child_nidx), splits[0], timestamp++)); qexpand_->push(ExpandEntry(right_child_nidx, tree.GetDepth(right_child_nidx), splits[1], timestamp++)); monitor.Stop("EvaluateSplits"); } } } struct ExpandEntry { int nid; int depth; DeviceSplitCandidate split; uint64_t timestamp; ExpandEntry(int nid, int depth, const DeviceSplitCandidate& split, uint64_t timestamp) : nid(nid), depth(depth), split(split), timestamp(timestamp) {} bool IsValid(const TrainParam& param, int num_leaves) const { if (split.loss_chg <= rt_eps) return false; if (param.max_depth > 0 && depth == param.max_depth) return false; if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false; return true; } static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) { if (param.max_depth > 0 && depth == param.max_depth) return false; if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false; return true; } friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) { os << "ExpandEntry: \n"; os << "nidx: " << e.nid << "\n"; os << "depth: " << e.depth << "\n"; os << "loss: " << e.split.loss_chg << "\n"; os << "left_sum: " << e.split.left_sum << "\n"; os << "right_sum: " << e.split.right_sum << "\n"; return os; } }; inline static bool depth_wise(ExpandEntry lhs, ExpandEntry rhs) { if (lhs.depth == rhs.depth) { return lhs.timestamp > rhs.timestamp; // favor small timestamp } else { return lhs.depth > rhs.depth; // favor small depth } } inline static bool loss_guide(ExpandEntry lhs, ExpandEntry rhs) { if (lhs.split.loss_chg == rhs.split.loss_chg) { return lhs.timestamp > rhs.timestamp; // favor small timestamp } else { return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg } } TrainParam param; common::HistCutMatrix hmat_; common::GHistIndexMatrix gmat_; MetaInfo* info; bool initialised; int n_devices; int n_bins; std::vector<std::unique_ptr<DeviceShard>> shards; ColumnSampler column_sampler; typedef std::priority_queue<ExpandEntry, std::vector<ExpandEntry>, std::function<bool(ExpandEntry, ExpandEntry)>> ExpandQueue; std::unique_ptr<ExpandQueue> qexpand_; common::Monitor monitor; dh::AllReducer reducer; }; XGBOOST_REGISTER_TREE_UPDATER(GPUHistMakerExperimental, "grow_gpu_hist_experimental") .describe("Grow tree with GPU.") .set_body([]() { return new GPUHistMakerExperimental(); }); } // namespace tree } // namespace xgboost
fc6c4ed928fd4b9d7322b831f7f3ff27ee8f67c5.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "opencv2/opencv_modules.hpp" #ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include "opencv2/core/cuda.hpp" #include "opencv2/cudev.hpp" #include "opencv2/core/cuda/utility.hpp" using namespace cv; using namespace cv::cuda; using namespace cv::cudev; device::ThrustAllocator::~ThrustAllocator() { } namespace { class DefaultThrustAllocator: public cv::cuda::device::ThrustAllocator { public: __device__ __host__ uchar* allocate(size_t numBytes) { #ifndef __CUDA_ARCH__ uchar* ptr; CV_CUDEV_SAFE_CALL(hipMalloc(&ptr, numBytes)); return ptr; #else return NULL; #endif } __device__ __host__ void deallocate(uchar* ptr, size_t numBytes) { (void)numBytes; #ifndef __CUDA_ARCH__ CV_CUDEV_SAFE_CALL(hipFree(ptr)); #endif } }; DefaultThrustAllocator defaultThrustAllocator; cv::cuda::device::ThrustAllocator* g_thrustAllocator = &defaultThrustAllocator; } cv::cuda::device::ThrustAllocator& cv::cuda::device::ThrustAllocator::getAllocator() { return *g_thrustAllocator; } void cv::cuda::device::ThrustAllocator::setAllocator(cv::cuda::device::ThrustAllocator* allocator) { if(allocator == NULL) g_thrustAllocator = &defaultThrustAllocator; else g_thrustAllocator = allocator; } namespace { class DefaultAllocator : public GpuMat::Allocator { public: bool allocate(GpuMat* mat, int rows, int cols, size_t elemSize); void free(GpuMat* mat); }; bool DefaultAllocator::allocate(GpuMat* mat, int rows, int cols, size_t elemSize) { if (rows > 1 && cols > 1) { CV_CUDEV_SAFE_CALL( hipMallocPitch(&mat->data, &mat->step, elemSize * cols, rows) ); } else { // Single row or single column must be continuous CV_CUDEV_SAFE_CALL( hipMalloc(&mat->data, elemSize * cols * rows) ); mat->step = elemSize * cols; } mat->refcount = (int*) fastMalloc(sizeof(int)); return true; } void DefaultAllocator::free(GpuMat* mat) { hipFree(mat->datastart); fastFree(mat->refcount); } DefaultAllocator cudaDefaultAllocator; GpuMat::Allocator* g_defaultAllocator = &cudaDefaultAllocator; } GpuMat::Allocator* cv::cuda::GpuMat::defaultAllocator() { return g_defaultAllocator; } void cv::cuda::GpuMat::setDefaultAllocator(Allocator* allocator) { CV_Assert( allocator != 0 ); g_defaultAllocator = allocator; } ///////////////////////////////////////////////////// /// create void cv::cuda::GpuMat::create(int _rows, int _cols, int _type) { CV_DbgAssert( _rows >= 0 && _cols >= 0 ); _type &= Mat::TYPE_MASK; if (rows == _rows && cols == _cols && type() == _type && data) return; if (data) release(); if (_rows > 0 && _cols > 0) { flags = Mat::MAGIC_VAL + _type; rows = _rows; cols = _cols; const size_t esz = elemSize(); bool allocSuccess = allocator->allocate(this, rows, cols, esz); if (!allocSuccess) { // custom allocator fails, try default allocator allocator = defaultAllocator(); allocSuccess = allocator->allocate(this, rows, cols, esz); CV_Assert( allocSuccess ); } if (esz * cols == step) flags |= Mat::CONTINUOUS_FLAG; int64 _nettosize = static_cast<int64>(step) * rows; size_t nettosize = static_cast<size_t>(_nettosize); datastart = data; dataend = data + nettosize; if (refcount) *refcount = 1; } } ///////////////////////////////////////////////////// /// release void cv::cuda::GpuMat::release() { CV_DbgAssert( allocator != 0 ); if (refcount && CV_XADD(refcount, -1) == 1) allocator->free(this); dataend = data = datastart = 0; step = rows = cols = 0; refcount = 0; } ///////////////////////////////////////////////////// /// upload void cv::cuda::GpuMat::upload(InputArray arr) { Mat mat = arr.getMat(); CV_DbgAssert( !mat.empty() ); create(mat.size(), mat.type()); CV_CUDEV_SAFE_CALL( hipMemcpy2D(data, step, mat.data, mat.step, cols * elemSize(), rows, hipMemcpyHostToDevice) ); } void cv::cuda::GpuMat::upload(InputArray arr, Stream& _stream) { Mat mat = arr.getMat(); CV_DbgAssert( !mat.empty() ); create(mat.size(), mat.type()); hipStream_t stream = StreamAccessor::getStream(_stream); CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync(data, step, mat.data, mat.step, cols * elemSize(), rows, hipMemcpyHostToDevice, stream) ); } ///////////////////////////////////////////////////// /// download void cv::cuda::GpuMat::download(OutputArray _dst) const { CV_DbgAssert( !empty() ); _dst.create(size(), type()); Mat dst = _dst.getMat(); CV_CUDEV_SAFE_CALL( hipMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToHost) ); } void cv::cuda::GpuMat::download(OutputArray _dst, Stream& _stream) const { CV_DbgAssert( !empty() ); _dst.create(size(), type()); Mat dst = _dst.getMat(); hipStream_t stream = StreamAccessor::getStream(_stream); CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToHost, stream) ); } ///////////////////////////////////////////////////// /// copyTo void cv::cuda::GpuMat::copyTo(OutputArray _dst) const { CV_DbgAssert( !empty() ); _dst.create(size(), type()); GpuMat dst = _dst.getGpuMat(); CV_CUDEV_SAFE_CALL( hipMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToDevice) ); } void cv::cuda::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const { CV_DbgAssert( !empty() ); _dst.create(size(), type()); GpuMat dst = _dst.getGpuMat(); hipStream_t stream = StreamAccessor::getStream(_stream); CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToDevice, stream) ); } namespace { template <size_t size> struct CopyToPolicy : DefaultTransformPolicy { }; template <> struct CopyToPolicy<4> : DefaultTransformPolicy { enum { shift = 2 }; }; template <> struct CopyToPolicy<8> : DefaultTransformPolicy { enum { shift = 1 }; }; template <typename T> void copyWithMask(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream) { gridTransformUnary_< CopyToPolicy<sizeof(typename VecTraits<T>::elem_type)> >(globPtr<T>(src), globPtr<T>(dst), identity<T>(), globPtr<uchar>(mask), stream); } } void cv::cuda::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& stream) const { CV_DbgAssert( !empty() ); CV_DbgAssert( depth() <= CV_64F && channels() <= 4 ); GpuMat mask = _mask.getGpuMat(); CV_DbgAssert( size() == mask.size() && mask.depth() == CV_8U && (mask.channels() == 1 || mask.channels() == channels()) ); uchar* data0 = _dst.getGpuMat().data; _dst.create(size(), type()); GpuMat dst = _dst.getGpuMat(); // do not leave dst uninitialized if (dst.data != data0) dst.setTo(Scalar::all(0), stream); typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream); static const func_t funcs[9][4] = { {0,0,0,0}, {copyWithMask<uchar>, copyWithMask<uchar2>, copyWithMask<uchar3>, copyWithMask<uchar4>}, {copyWithMask<ushort>, copyWithMask<ushort2>, copyWithMask<ushort3>, copyWithMask<ushort4>}, {0,0,0,0}, {copyWithMask<int>, copyWithMask<int2>, copyWithMask<int3>, copyWithMask<int4>}, {0,0,0,0}, {0,0,0,0}, {0,0,0,0}, {copyWithMask<double>, copyWithMask<double2>, copyWithMask<double3>, copyWithMask<double4>} }; if (mask.channels() == channels()) { const func_t func = funcs[elemSize1()][0]; CV_DbgAssert( func != 0 ); func(reshape(1), dst.reshape(1), mask.reshape(1), stream); } else { const func_t func = funcs[elemSize1()][channels() - 1]; CV_DbgAssert( func != 0 ); func(*this, dst, mask, stream); } } ///////////////////////////////////////////////////// /// setTo namespace { template <typename T> void setToWithOutMask(const GpuMat& mat, Scalar _scalar, Stream& stream) { Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar; gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), stream); } template <typename T> void setToWithMask(const GpuMat& mat, const GpuMat& mask, Scalar _scalar, Stream& stream) { Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar; gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), globPtr<uchar>(mask), stream); } } GpuMat& cv::cuda::GpuMat::setTo(Scalar value, Stream& stream) { CV_DbgAssert( !empty() ); CV_DbgAssert( depth() <= CV_64F && channels() <= 4 ); if (value[0] == 0.0 && value[1] == 0.0 && value[2] == 0.0 && value[3] == 0.0) { // Zero fill if (stream) CV_CUDEV_SAFE_CALL( hipMemset2DAsync(data, step, 0, cols * elemSize(), rows, StreamAccessor::getStream(stream)) ); else CV_CUDEV_SAFE_CALL( hipMemset2D(data, step, 0, cols * elemSize(), rows) ); return *this; } if (depth() == CV_8U) { const int cn = channels(); if (cn == 1 || (cn == 2 && value[0] == value[1]) || (cn == 3 && value[0] == value[1] && value[0] == value[2]) || (cn == 4 && value[0] == value[1] && value[0] == value[2] && value[0] == value[3])) { const int val = cv::saturate_cast<uchar>(value[0]); if (stream) CV_CUDEV_SAFE_CALL( hipMemset2DAsync(data, step, val, cols * elemSize(), rows, StreamAccessor::getStream(stream)) ); else CV_CUDEV_SAFE_CALL( hipMemset2D(data, step, val, cols * elemSize(), rows) ); return *this; } } typedef void (*func_t)(const GpuMat& mat, Scalar scalar, Stream& stream); static const func_t funcs[7][4] = { {setToWithOutMask<uchar>,setToWithOutMask<uchar2>,setToWithOutMask<uchar3>,setToWithOutMask<uchar4>}, {setToWithOutMask<schar>,setToWithOutMask<char2>,setToWithOutMask<char3>,setToWithOutMask<char4>}, {setToWithOutMask<ushort>,setToWithOutMask<ushort2>,setToWithOutMask<ushort3>,setToWithOutMask<ushort4>}, {setToWithOutMask<short>,setToWithOutMask<short2>,setToWithOutMask<short3>,setToWithOutMask<short4>}, {setToWithOutMask<int>,setToWithOutMask<int2>,setToWithOutMask<int3>,setToWithOutMask<int4>}, {setToWithOutMask<float>,setToWithOutMask<float2>,setToWithOutMask<float3>,setToWithOutMask<float4>}, {setToWithOutMask<double>,setToWithOutMask<double2>,setToWithOutMask<double3>,setToWithOutMask<double4>} }; funcs[depth()][channels() - 1](*this, value, stream); return *this; } GpuMat& cv::cuda::GpuMat::setTo(Scalar value, InputArray _mask, Stream& stream) { CV_DbgAssert( !empty() ); CV_DbgAssert( depth() <= CV_64F && channels() <= 4 ); GpuMat mask = _mask.getGpuMat(); if (mask.empty()) { return setTo(value, stream); } CV_DbgAssert( size() == mask.size() && mask.type() == CV_8UC1 ); typedef void (*func_t)(const GpuMat& mat, const GpuMat& mask, Scalar scalar, Stream& stream); static const func_t funcs[7][4] = { {setToWithMask<uchar>,setToWithMask<uchar2>,setToWithMask<uchar3>,setToWithMask<uchar4>}, {setToWithMask<schar>,setToWithMask<char2>,setToWithMask<char3>,setToWithMask<char4>}, {setToWithMask<ushort>,setToWithMask<ushort2>,setToWithMask<ushort3>,setToWithMask<ushort4>}, {setToWithMask<short>,setToWithMask<short2>,setToWithMask<short3>,setToWithMask<short4>}, {setToWithMask<int>,setToWithMask<int2>,setToWithMask<int3>,setToWithMask<int4>}, {setToWithMask<float>,setToWithMask<float2>,setToWithMask<float3>,setToWithMask<float4>}, {setToWithMask<double>,setToWithMask<double2>,setToWithMask<double3>,setToWithMask<double4>} }; funcs[depth()][channels() - 1](*this, mask, value, stream); return *this; } ///////////////////////////////////////////////////// /// convertTo namespace { template <typename T> struct ConvertToPolicy : DefaultTransformPolicy { }; template <> struct ConvertToPolicy<double> : DefaultTransformPolicy { enum { shift = 1 }; }; template <typename T, typename D> void convertToNoScale(const GpuMat& src, const GpuMat& dst, Stream& stream) { typedef typename VecTraits<T>::elem_type src_elem_type; typedef typename VecTraits<D>::elem_type dst_elem_type; typedef typename LargerType<src_elem_type, float>::type larger_elem_type; typedef typename LargerType<float, dst_elem_type>::type scalar_type; gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), saturate_cast_func<T, D>(), stream); } template <typename T, typename D, typename S> struct Convertor : unary_function<T, D> { S alpha; S beta; __device__ __forceinline__ D operator ()(typename TypeTraits<T>::parameter_type src) const { return cudev::saturate_cast<D>(alpha * src + beta); } }; template <typename T, typename D> void convertToScale(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream) { typedef typename VecTraits<T>::elem_type src_elem_type; typedef typename VecTraits<D>::elem_type dst_elem_type; typedef typename LargerType<src_elem_type, float>::type larger_elem_type; typedef typename LargerType<float, dst_elem_type>::type scalar_type; Convertor<T, D, scalar_type> op; op.alpha = cv::saturate_cast<scalar_type>(alpha); op.beta = cv::saturate_cast<scalar_type>(beta); gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), op, stream); } template <typename T, typename D> void convertScaleHalf(const GpuMat& src, const GpuMat& dst, Stream& stream) { typedef typename VecTraits<T>::elem_type src_elem_type; typedef typename VecTraits<D>::elem_type dst_elem_type; typedef typename LargerType<src_elem_type, float>::type larger_elem_type; typedef typename LargerType<float, dst_elem_type>::type scalar_type; gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), saturate_cast_fp16_func<T,D>(), stream); } } void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& stream) const { if (rtype < 0) rtype = type(); else rtype = CV_MAKE_TYPE(CV_MAT_DEPTH(rtype), channels()); const int sdepth = depth(); const int ddepth = CV_MAT_DEPTH(rtype); if (sdepth == ddepth) { if (stream) copyTo(_dst, stream); else copyTo(_dst); return; } CV_DbgAssert( sdepth <= CV_64F && ddepth <= CV_64F ); GpuMat src = *this; _dst.create(size(), rtype); GpuMat dst = _dst.getGpuMat(); typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream); static const func_t funcs[7][7] = { {0, convertToNoScale<uchar, schar>, convertToNoScale<uchar, ushort>, convertToNoScale<uchar, short>, convertToNoScale<uchar, int>, convertToNoScale<uchar, float>, convertToNoScale<uchar, double>}, {convertToNoScale<schar, uchar>, 0, convertToNoScale<schar, ushort>, convertToNoScale<schar, short>, convertToNoScale<schar, int>, convertToNoScale<schar, float>, convertToNoScale<schar, double>}, {convertToNoScale<ushort, uchar>, convertToNoScale<ushort, schar>, 0, convertToNoScale<ushort, short>, convertToNoScale<ushort, int>, convertToNoScale<ushort, float>, convertToNoScale<ushort, double>}, {convertToNoScale<short, uchar>, convertToNoScale<short, schar>, convertToNoScale<short, ushort>, 0, convertToNoScale<short, int>, convertToNoScale<short, float>, convertToNoScale<short, double>}, {convertToNoScale<int, uchar>, convertToNoScale<int, schar>, convertToNoScale<int, ushort>, convertToNoScale<int, short>, 0, convertToNoScale<int, float>, convertToNoScale<int, double>}, {convertToNoScale<float, uchar>, convertToNoScale<float, schar>, convertToNoScale<float, ushort>, convertToNoScale<float, short>, convertToNoScale<float, int>, 0, convertToNoScale<float, double>}, {convertToNoScale<double, uchar>, convertToNoScale<double, schar>, convertToNoScale<double, ushort>, convertToNoScale<double, short>, convertToNoScale<double, int>, convertToNoScale<double, float>, 0} }; funcs[sdepth][ddepth](reshape(1), dst.reshape(1), stream); } void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& stream) const { if (rtype < 0) rtype = type(); else rtype = CV_MAKETYPE(CV_MAT_DEPTH(rtype), channels()); const int sdepth = depth(); const int ddepth = CV_MAT_DEPTH(rtype); GpuMat src = *this; _dst.create(size(), rtype); GpuMat dst = _dst.getGpuMat(); typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream); static const func_t funcs[7][7] = { {convertToScale<uchar, uchar>, convertToScale<uchar, schar>, convertToScale<uchar, ushort>, convertToScale<uchar, short>, convertToScale<uchar, int>, convertToScale<uchar, float>, convertToScale<uchar, double>}, {convertToScale<schar, uchar>, convertToScale<schar, schar>, convertToScale<schar, ushort>, convertToScale<schar, short>, convertToScale<schar, int>, convertToScale<schar, float>, convertToScale<schar, double>}, {convertToScale<ushort, uchar>, convertToScale<ushort, schar>, convertToScale<ushort, ushort>, convertToScale<ushort, short>, convertToScale<ushort, int>, convertToScale<ushort, float>, convertToScale<ushort, double>}, {convertToScale<short, uchar>, convertToScale<short, schar>, convertToScale<short, ushort>, convertToScale<short, short>, convertToScale<short, int>, convertToScale<short, float>, convertToScale<short, double>}, {convertToScale<int, uchar>, convertToScale<int, schar>, convertToScale<int, ushort>, convertToScale<int, short>, convertToScale<int, int>, convertToScale<int, float>, convertToScale<int, double>}, {convertToScale<float, uchar>, convertToScale<float, schar>, convertToScale<float, ushort>, convertToScale<float, short>, convertToScale<float, int>, convertToScale<float, float>, convertToScale<float, double>}, {convertToScale<double, uchar>, convertToScale<double, schar>, convertToScale<double, ushort>, convertToScale<double, short>, convertToScale<double, int>, convertToScale<double, float>, convertToScale<double, double>} }; funcs[sdepth][ddepth](reshape(1), dst.reshape(1), alpha, beta, stream); } void cv::cuda::convertFp16(InputArray _src, OutputArray _dst, Stream& stream) { GpuMat src = _src.getGpuMat(); int ddepth = 0; switch(src.depth()) { case CV_32F: ddepth = CV_16S; break; case CV_16S: ddepth = CV_32F; break; default: CV_Error(Error::StsUnsupportedFormat, "Unsupported input depth"); return; } int type = CV_MAKE_TYPE(CV_MAT_DEPTH(ddepth), src.channels()); _dst.create(src.size(), type); GpuMat dst = _dst.getGpuMat(); typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream); static const func_t funcs[] = { 0, 0, 0, convertScaleHalf<float, short>, 0, convertScaleHalf<short, float>, 0, 0, }; funcs[ddepth](src.reshape(1), dst.reshape(1), stream); } #endif
fc6c4ed928fd4b9d7322b831f7f3ff27ee8f67c5.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "opencv2/opencv_modules.hpp" #ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include "opencv2/core/cuda.hpp" #include "opencv2/cudev.hpp" #include "opencv2/core/cuda/utility.hpp" using namespace cv; using namespace cv::cuda; using namespace cv::cudev; device::ThrustAllocator::~ThrustAllocator() { } namespace { class DefaultThrustAllocator: public cv::cuda::device::ThrustAllocator { public: __device__ __host__ uchar* allocate(size_t numBytes) { #ifndef __CUDA_ARCH__ uchar* ptr; CV_CUDEV_SAFE_CALL(cudaMalloc(&ptr, numBytes)); return ptr; #else return NULL; #endif } __device__ __host__ void deallocate(uchar* ptr, size_t numBytes) { (void)numBytes; #ifndef __CUDA_ARCH__ CV_CUDEV_SAFE_CALL(cudaFree(ptr)); #endif } }; DefaultThrustAllocator defaultThrustAllocator; cv::cuda::device::ThrustAllocator* g_thrustAllocator = &defaultThrustAllocator; } cv::cuda::device::ThrustAllocator& cv::cuda::device::ThrustAllocator::getAllocator() { return *g_thrustAllocator; } void cv::cuda::device::ThrustAllocator::setAllocator(cv::cuda::device::ThrustAllocator* allocator) { if(allocator == NULL) g_thrustAllocator = &defaultThrustAllocator; else g_thrustAllocator = allocator; } namespace { class DefaultAllocator : public GpuMat::Allocator { public: bool allocate(GpuMat* mat, int rows, int cols, size_t elemSize); void free(GpuMat* mat); }; bool DefaultAllocator::allocate(GpuMat* mat, int rows, int cols, size_t elemSize) { if (rows > 1 && cols > 1) { CV_CUDEV_SAFE_CALL( cudaMallocPitch(&mat->data, &mat->step, elemSize * cols, rows) ); } else { // Single row or single column must be continuous CV_CUDEV_SAFE_CALL( cudaMalloc(&mat->data, elemSize * cols * rows) ); mat->step = elemSize * cols; } mat->refcount = (int*) fastMalloc(sizeof(int)); return true; } void DefaultAllocator::free(GpuMat* mat) { cudaFree(mat->datastart); fastFree(mat->refcount); } DefaultAllocator cudaDefaultAllocator; GpuMat::Allocator* g_defaultAllocator = &cudaDefaultAllocator; } GpuMat::Allocator* cv::cuda::GpuMat::defaultAllocator() { return g_defaultAllocator; } void cv::cuda::GpuMat::setDefaultAllocator(Allocator* allocator) { CV_Assert( allocator != 0 ); g_defaultAllocator = allocator; } ///////////////////////////////////////////////////// /// create void cv::cuda::GpuMat::create(int _rows, int _cols, int _type) { CV_DbgAssert( _rows >= 0 && _cols >= 0 ); _type &= Mat::TYPE_MASK; if (rows == _rows && cols == _cols && type() == _type && data) return; if (data) release(); if (_rows > 0 && _cols > 0) { flags = Mat::MAGIC_VAL + _type; rows = _rows; cols = _cols; const size_t esz = elemSize(); bool allocSuccess = allocator->allocate(this, rows, cols, esz); if (!allocSuccess) { // custom allocator fails, try default allocator allocator = defaultAllocator(); allocSuccess = allocator->allocate(this, rows, cols, esz); CV_Assert( allocSuccess ); } if (esz * cols == step) flags |= Mat::CONTINUOUS_FLAG; int64 _nettosize = static_cast<int64>(step) * rows; size_t nettosize = static_cast<size_t>(_nettosize); datastart = data; dataend = data + nettosize; if (refcount) *refcount = 1; } } ///////////////////////////////////////////////////// /// release void cv::cuda::GpuMat::release() { CV_DbgAssert( allocator != 0 ); if (refcount && CV_XADD(refcount, -1) == 1) allocator->free(this); dataend = data = datastart = 0; step = rows = cols = 0; refcount = 0; } ///////////////////////////////////////////////////// /// upload void cv::cuda::GpuMat::upload(InputArray arr) { Mat mat = arr.getMat(); CV_DbgAssert( !mat.empty() ); create(mat.size(), mat.type()); CV_CUDEV_SAFE_CALL( cudaMemcpy2D(data, step, mat.data, mat.step, cols * elemSize(), rows, cudaMemcpyHostToDevice) ); } void cv::cuda::GpuMat::upload(InputArray arr, Stream& _stream) { Mat mat = arr.getMat(); CV_DbgAssert( !mat.empty() ); create(mat.size(), mat.type()); cudaStream_t stream = StreamAccessor::getStream(_stream); CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(data, step, mat.data, mat.step, cols * elemSize(), rows, cudaMemcpyHostToDevice, stream) ); } ///////////////////////////////////////////////////// /// download void cv::cuda::GpuMat::download(OutputArray _dst) const { CV_DbgAssert( !empty() ); _dst.create(size(), type()); Mat dst = _dst.getMat(); CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost) ); } void cv::cuda::GpuMat::download(OutputArray _dst, Stream& _stream) const { CV_DbgAssert( !empty() ); _dst.create(size(), type()); Mat dst = _dst.getMat(); cudaStream_t stream = StreamAccessor::getStream(_stream); CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost, stream) ); } ///////////////////////////////////////////////////// /// copyTo void cv::cuda::GpuMat::copyTo(OutputArray _dst) const { CV_DbgAssert( !empty() ); _dst.create(size(), type()); GpuMat dst = _dst.getGpuMat(); CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice) ); } void cv::cuda::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const { CV_DbgAssert( !empty() ); _dst.create(size(), type()); GpuMat dst = _dst.getGpuMat(); cudaStream_t stream = StreamAccessor::getStream(_stream); CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice, stream) ); } namespace { template <size_t size> struct CopyToPolicy : DefaultTransformPolicy { }; template <> struct CopyToPolicy<4> : DefaultTransformPolicy { enum { shift = 2 }; }; template <> struct CopyToPolicy<8> : DefaultTransformPolicy { enum { shift = 1 }; }; template <typename T> void copyWithMask(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream) { gridTransformUnary_< CopyToPolicy<sizeof(typename VecTraits<T>::elem_type)> >(globPtr<T>(src), globPtr<T>(dst), identity<T>(), globPtr<uchar>(mask), stream); } } void cv::cuda::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& stream) const { CV_DbgAssert( !empty() ); CV_DbgAssert( depth() <= CV_64F && channels() <= 4 ); GpuMat mask = _mask.getGpuMat(); CV_DbgAssert( size() == mask.size() && mask.depth() == CV_8U && (mask.channels() == 1 || mask.channels() == channels()) ); uchar* data0 = _dst.getGpuMat().data; _dst.create(size(), type()); GpuMat dst = _dst.getGpuMat(); // do not leave dst uninitialized if (dst.data != data0) dst.setTo(Scalar::all(0), stream); typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream); static const func_t funcs[9][4] = { {0,0,0,0}, {copyWithMask<uchar>, copyWithMask<uchar2>, copyWithMask<uchar3>, copyWithMask<uchar4>}, {copyWithMask<ushort>, copyWithMask<ushort2>, copyWithMask<ushort3>, copyWithMask<ushort4>}, {0,0,0,0}, {copyWithMask<int>, copyWithMask<int2>, copyWithMask<int3>, copyWithMask<int4>}, {0,0,0,0}, {0,0,0,0}, {0,0,0,0}, {copyWithMask<double>, copyWithMask<double2>, copyWithMask<double3>, copyWithMask<double4>} }; if (mask.channels() == channels()) { const func_t func = funcs[elemSize1()][0]; CV_DbgAssert( func != 0 ); func(reshape(1), dst.reshape(1), mask.reshape(1), stream); } else { const func_t func = funcs[elemSize1()][channels() - 1]; CV_DbgAssert( func != 0 ); func(*this, dst, mask, stream); } } ///////////////////////////////////////////////////// /// setTo namespace { template <typename T> void setToWithOutMask(const GpuMat& mat, Scalar _scalar, Stream& stream) { Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar; gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), stream); } template <typename T> void setToWithMask(const GpuMat& mat, const GpuMat& mask, Scalar _scalar, Stream& stream) { Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar; gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), globPtr<uchar>(mask), stream); } } GpuMat& cv::cuda::GpuMat::setTo(Scalar value, Stream& stream) { CV_DbgAssert( !empty() ); CV_DbgAssert( depth() <= CV_64F && channels() <= 4 ); if (value[0] == 0.0 && value[1] == 0.0 && value[2] == 0.0 && value[3] == 0.0) { // Zero fill if (stream) CV_CUDEV_SAFE_CALL( cudaMemset2DAsync(data, step, 0, cols * elemSize(), rows, StreamAccessor::getStream(stream)) ); else CV_CUDEV_SAFE_CALL( cudaMemset2D(data, step, 0, cols * elemSize(), rows) ); return *this; } if (depth() == CV_8U) { const int cn = channels(); if (cn == 1 || (cn == 2 && value[0] == value[1]) || (cn == 3 && value[0] == value[1] && value[0] == value[2]) || (cn == 4 && value[0] == value[1] && value[0] == value[2] && value[0] == value[3])) { const int val = cv::saturate_cast<uchar>(value[0]); if (stream) CV_CUDEV_SAFE_CALL( cudaMemset2DAsync(data, step, val, cols * elemSize(), rows, StreamAccessor::getStream(stream)) ); else CV_CUDEV_SAFE_CALL( cudaMemset2D(data, step, val, cols * elemSize(), rows) ); return *this; } } typedef void (*func_t)(const GpuMat& mat, Scalar scalar, Stream& stream); static const func_t funcs[7][4] = { {setToWithOutMask<uchar>,setToWithOutMask<uchar2>,setToWithOutMask<uchar3>,setToWithOutMask<uchar4>}, {setToWithOutMask<schar>,setToWithOutMask<char2>,setToWithOutMask<char3>,setToWithOutMask<char4>}, {setToWithOutMask<ushort>,setToWithOutMask<ushort2>,setToWithOutMask<ushort3>,setToWithOutMask<ushort4>}, {setToWithOutMask<short>,setToWithOutMask<short2>,setToWithOutMask<short3>,setToWithOutMask<short4>}, {setToWithOutMask<int>,setToWithOutMask<int2>,setToWithOutMask<int3>,setToWithOutMask<int4>}, {setToWithOutMask<float>,setToWithOutMask<float2>,setToWithOutMask<float3>,setToWithOutMask<float4>}, {setToWithOutMask<double>,setToWithOutMask<double2>,setToWithOutMask<double3>,setToWithOutMask<double4>} }; funcs[depth()][channels() - 1](*this, value, stream); return *this; } GpuMat& cv::cuda::GpuMat::setTo(Scalar value, InputArray _mask, Stream& stream) { CV_DbgAssert( !empty() ); CV_DbgAssert( depth() <= CV_64F && channels() <= 4 ); GpuMat mask = _mask.getGpuMat(); if (mask.empty()) { return setTo(value, stream); } CV_DbgAssert( size() == mask.size() && mask.type() == CV_8UC1 ); typedef void (*func_t)(const GpuMat& mat, const GpuMat& mask, Scalar scalar, Stream& stream); static const func_t funcs[7][4] = { {setToWithMask<uchar>,setToWithMask<uchar2>,setToWithMask<uchar3>,setToWithMask<uchar4>}, {setToWithMask<schar>,setToWithMask<char2>,setToWithMask<char3>,setToWithMask<char4>}, {setToWithMask<ushort>,setToWithMask<ushort2>,setToWithMask<ushort3>,setToWithMask<ushort4>}, {setToWithMask<short>,setToWithMask<short2>,setToWithMask<short3>,setToWithMask<short4>}, {setToWithMask<int>,setToWithMask<int2>,setToWithMask<int3>,setToWithMask<int4>}, {setToWithMask<float>,setToWithMask<float2>,setToWithMask<float3>,setToWithMask<float4>}, {setToWithMask<double>,setToWithMask<double2>,setToWithMask<double3>,setToWithMask<double4>} }; funcs[depth()][channels() - 1](*this, mask, value, stream); return *this; } ///////////////////////////////////////////////////// /// convertTo namespace { template <typename T> struct ConvertToPolicy : DefaultTransformPolicy { }; template <> struct ConvertToPolicy<double> : DefaultTransformPolicy { enum { shift = 1 }; }; template <typename T, typename D> void convertToNoScale(const GpuMat& src, const GpuMat& dst, Stream& stream) { typedef typename VecTraits<T>::elem_type src_elem_type; typedef typename VecTraits<D>::elem_type dst_elem_type; typedef typename LargerType<src_elem_type, float>::type larger_elem_type; typedef typename LargerType<float, dst_elem_type>::type scalar_type; gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), saturate_cast_func<T, D>(), stream); } template <typename T, typename D, typename S> struct Convertor : unary_function<T, D> { S alpha; S beta; __device__ __forceinline__ D operator ()(typename TypeTraits<T>::parameter_type src) const { return cudev::saturate_cast<D>(alpha * src + beta); } }; template <typename T, typename D> void convertToScale(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream) { typedef typename VecTraits<T>::elem_type src_elem_type; typedef typename VecTraits<D>::elem_type dst_elem_type; typedef typename LargerType<src_elem_type, float>::type larger_elem_type; typedef typename LargerType<float, dst_elem_type>::type scalar_type; Convertor<T, D, scalar_type> op; op.alpha = cv::saturate_cast<scalar_type>(alpha); op.beta = cv::saturate_cast<scalar_type>(beta); gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), op, stream); } template <typename T, typename D> void convertScaleHalf(const GpuMat& src, const GpuMat& dst, Stream& stream) { typedef typename VecTraits<T>::elem_type src_elem_type; typedef typename VecTraits<D>::elem_type dst_elem_type; typedef typename LargerType<src_elem_type, float>::type larger_elem_type; typedef typename LargerType<float, dst_elem_type>::type scalar_type; gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), saturate_cast_fp16_func<T,D>(), stream); } } void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& stream) const { if (rtype < 0) rtype = type(); else rtype = CV_MAKE_TYPE(CV_MAT_DEPTH(rtype), channels()); const int sdepth = depth(); const int ddepth = CV_MAT_DEPTH(rtype); if (sdepth == ddepth) { if (stream) copyTo(_dst, stream); else copyTo(_dst); return; } CV_DbgAssert( sdepth <= CV_64F && ddepth <= CV_64F ); GpuMat src = *this; _dst.create(size(), rtype); GpuMat dst = _dst.getGpuMat(); typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream); static const func_t funcs[7][7] = { {0, convertToNoScale<uchar, schar>, convertToNoScale<uchar, ushort>, convertToNoScale<uchar, short>, convertToNoScale<uchar, int>, convertToNoScale<uchar, float>, convertToNoScale<uchar, double>}, {convertToNoScale<schar, uchar>, 0, convertToNoScale<schar, ushort>, convertToNoScale<schar, short>, convertToNoScale<schar, int>, convertToNoScale<schar, float>, convertToNoScale<schar, double>}, {convertToNoScale<ushort, uchar>, convertToNoScale<ushort, schar>, 0, convertToNoScale<ushort, short>, convertToNoScale<ushort, int>, convertToNoScale<ushort, float>, convertToNoScale<ushort, double>}, {convertToNoScale<short, uchar>, convertToNoScale<short, schar>, convertToNoScale<short, ushort>, 0, convertToNoScale<short, int>, convertToNoScale<short, float>, convertToNoScale<short, double>}, {convertToNoScale<int, uchar>, convertToNoScale<int, schar>, convertToNoScale<int, ushort>, convertToNoScale<int, short>, 0, convertToNoScale<int, float>, convertToNoScale<int, double>}, {convertToNoScale<float, uchar>, convertToNoScale<float, schar>, convertToNoScale<float, ushort>, convertToNoScale<float, short>, convertToNoScale<float, int>, 0, convertToNoScale<float, double>}, {convertToNoScale<double, uchar>, convertToNoScale<double, schar>, convertToNoScale<double, ushort>, convertToNoScale<double, short>, convertToNoScale<double, int>, convertToNoScale<double, float>, 0} }; funcs[sdepth][ddepth](reshape(1), dst.reshape(1), stream); } void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& stream) const { if (rtype < 0) rtype = type(); else rtype = CV_MAKETYPE(CV_MAT_DEPTH(rtype), channels()); const int sdepth = depth(); const int ddepth = CV_MAT_DEPTH(rtype); GpuMat src = *this; _dst.create(size(), rtype); GpuMat dst = _dst.getGpuMat(); typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream); static const func_t funcs[7][7] = { {convertToScale<uchar, uchar>, convertToScale<uchar, schar>, convertToScale<uchar, ushort>, convertToScale<uchar, short>, convertToScale<uchar, int>, convertToScale<uchar, float>, convertToScale<uchar, double>}, {convertToScale<schar, uchar>, convertToScale<schar, schar>, convertToScale<schar, ushort>, convertToScale<schar, short>, convertToScale<schar, int>, convertToScale<schar, float>, convertToScale<schar, double>}, {convertToScale<ushort, uchar>, convertToScale<ushort, schar>, convertToScale<ushort, ushort>, convertToScale<ushort, short>, convertToScale<ushort, int>, convertToScale<ushort, float>, convertToScale<ushort, double>}, {convertToScale<short, uchar>, convertToScale<short, schar>, convertToScale<short, ushort>, convertToScale<short, short>, convertToScale<short, int>, convertToScale<short, float>, convertToScale<short, double>}, {convertToScale<int, uchar>, convertToScale<int, schar>, convertToScale<int, ushort>, convertToScale<int, short>, convertToScale<int, int>, convertToScale<int, float>, convertToScale<int, double>}, {convertToScale<float, uchar>, convertToScale<float, schar>, convertToScale<float, ushort>, convertToScale<float, short>, convertToScale<float, int>, convertToScale<float, float>, convertToScale<float, double>}, {convertToScale<double, uchar>, convertToScale<double, schar>, convertToScale<double, ushort>, convertToScale<double, short>, convertToScale<double, int>, convertToScale<double, float>, convertToScale<double, double>} }; funcs[sdepth][ddepth](reshape(1), dst.reshape(1), alpha, beta, stream); } void cv::cuda::convertFp16(InputArray _src, OutputArray _dst, Stream& stream) { GpuMat src = _src.getGpuMat(); int ddepth = 0; switch(src.depth()) { case CV_32F: ddepth = CV_16S; break; case CV_16S: ddepth = CV_32F; break; default: CV_Error(Error::StsUnsupportedFormat, "Unsupported input depth"); return; } int type = CV_MAKE_TYPE(CV_MAT_DEPTH(ddepth), src.channels()); _dst.create(src.size(), type); GpuMat dst = _dst.getGpuMat(); typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream); static const func_t funcs[] = { 0, 0, 0, convertScaleHalf<float, short>, 0, convertScaleHalf<short, float>, 0, 0, }; funcs[ddepth](src.reshape(1), dst.reshape(1), stream); } #endif
5814ada8b4e81505d041ab7653084eb949b235a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit test for the PipelineTmaAsync class used in a WarpSpecialized Persistent loop */ #define KERNEL_DBG_TRACE false #include "../common/cutlass_unit_test.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> #include <cute/arch/cluster_sm90.hpp> #include <cutlass/util/reference/host/gemm.h> #include <cutlass/cluster_launch.hpp> #include "cutlass/core_io.h" #include "cutlass/util/print_error.hpp" #include "cutlass/util/GPU_Clock.hpp" #include "testbed.h" #include "cutlass/pipeline.hpp" #include "cutlass/arch/barrier.h" #include "cute/arch/cluster_sm90.hpp" #include "cutlass/arch/barrier.h" #include "cutlass/arch/reg_reconfig.h" using namespace cute; using namespace cutlass; //////////////////// KERNEL ///////////////////////// template <uint32_t Stages, typename ClusterShape, typename PingPongBarrier> struct SharedStorage { typename cutlass::PipelineTmaAsync<Stages, ClusterShape>::SharedStorage pipeline_storage; typename PingPongBarrier::SharedStorage pingpong_storage; }; template <typename ClusterShape, uint32_t Stages> struct CollectiveSimulation { using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages, ClusterShape>; using PipelineState = typename cutlass::PipelineState<Stages>; CUTLASS_DEVICE static void dma_wg_simulation(MainloopPipeline pipeline, PipelineState tile_start_state_pipe, uint32_t const num_iterations) { uint32_t const per_cta_bytes = sizeof(uint32_t); int warp_idx_in_warpgroup = __shfl_sync(0xffffffff, (threadIdx.x / 32) % 4, 0); int lane_predicate = cute::elect_one_sync(); if (warp_idx_in_warpgroup==0 && lane_predicate) { int tma_k_prologue = min(Stages, num_iterations); // Simulating Prologue TMA Loads CUTLASS_PRAGMA_UNROLL for(int i = 0; i < tma_k_prologue; ++i) { pipeline.producer_acquire(tile_start_state_pipe); // Simulating cp.async.bulk.tensor behavior pipeline.producer_commit(tile_start_state_pipe.index(), per_cta_bytes); ++tile_start_state_pipe; } int tma_k_iter = num_iterations - tma_k_prologue; PipelineState wr_pipe = tile_start_state_pipe; // Simulating Mainloop TMA Loads CUTE_NO_UNROLL for ( ; tma_k_iter > 0; --tma_k_iter){ pipeline.producer_acquire(wr_pipe); // Simulating cp.async.bulk.tensor behavior pipeline.producer_commit(wr_pipe.index(), per_cta_bytes); // Advance write stage ++wr_pipe; } } } CUTLASS_DEVICE static void math_wg_simulation(MainloopPipeline pipeline, PipelineState tile_start_state_pipe, uint32_t const num_iterations, int* data_ptr) { PipelineState rd_pipe = tile_start_state_pipe; PipelineState release_pipe = rd_pipe; // simulates accumulators + extra reg. pressure int arr[168]; // Init Shared Memory read stages & PhaseBit static constexpr uint32_t K_PIPE_MMAS = 1; static_assert( K_PIPE_MMAS < Stages, "ERROR : Too many MMAs in flight"); // Total number of gemm iterations auto gemm_k_iterations = num_iterations; // Simulating Prologue MMAs int mma_k_prologue = min(K_PIPE_MMAS, gemm_k_iterations); CUTLASS_PRAGMA_UNROLL for (int iter = 0; iter < mma_k_prologue; ++iter) { pipeline.consumer_wait(rd_pipe); warpgroup_arrive(); // GMMA would typically happen here ++rd_pipe; } gemm_k_iterations -= mma_k_prologue; // Simulating Mainloop MMAs CUTLASS_PRAGMA_NO_UNROLL for ( ; gemm_k_iterations > 0; --gemm_k_iterations) { /// Wait on the rd_pipe stage / phase pipeline.consumer_wait(rd_pipe); warpgroup_arrive(); // GMMA would typically happen here // Dummy op - which will never happen // But simulates high register usage. CUTE_UNROLL for(int i = 0; i < 168; ++i){ if (threadIdx.x > 384){ arr[i] += data_ptr[i]; } } pipeline.consumer_release(release_pipe); // Advance stages ++rd_pipe; ++release_pipe; } // Dummy op - which will never happen CUTE_UNROLL for(int i = 0; i < 168; ++i){ if (threadIdx.x > 384){ data_ptr[i] = arr[i]; } } // Tail Loop for (int i = 0; i < K_PIPE_MMAS; ++i){ pipeline.consumer_release(release_pipe); ++release_pipe; } } }; struct KernelParams { uint32_t num_iterations; int tiles_per_cluster; int* data_ptr; }; // Goal of this kernel is to complete deadlock-free template <typename ClusterShape, uint32_t Stages> __launch_bounds__(384, 1) __global__ static void pipeline_device(KernelParams params) { extern __shared__ char shared_memory[]; using DispatchPolicy = cutlass::gemm::MainloopSm90TmaGmmaWarpSpecialized<Stages, ClusterShape, cutlass::gemm::KernelTmaWarpSpecializedPersistent>; using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages, ClusterShape>; using PipelineState = typename cutlass::PipelineState<Stages>; /* One for Mainloop and one for Epilogue */ constexpr int StagesPerMathWarpGroup = 2; constexpr int MathWarpGroupCountPersistent = 2; using PingPongBarrier = typename cutlass::OrderedSequenceBarrier<StagesPerMathWarpGroup, MathWarpGroupCountPersistent>; using SharedStorage = SharedStorage<Stages, ClusterShape, PingPongBarrier>; SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory); auto cta_layout = Layout<ClusterShape>{}; // (m,n) -> cta_id int warp_group_idx = __shfl_sync(0xffffffff, threadIdx.x / NumThreadsPerWarpGroup, 0); int warp_group_thread_idx = threadIdx.x % NumThreadsPerWarpGroup; dim3 block_id_in_cluster = cute::block_id_in_cluster(); auto cluster_shape = ClusterShape{}; // #Producers = #RowsInCluster + #ColsInCluster - 1 uint32_t const NumProducers = cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1; uint32_t const TmaTransactionBytes = static_cast<uint32_t>(sizeof(uint32_t) * NumProducers); // mbarrier.init typename MainloopPipeline::Params pipeline_params; pipeline_params.transaction_bytes = TmaTransactionBytes; if (warp_group_idx == 0) { pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; } else { pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; } pipeline_params.is_leader = warp_group_thread_idx == 0; pipeline_params.num_consumers = NumThreadsPerWarpGroup; MainloopPipeline pipeline(shared_storage.pipeline_storage, pipeline_params); PipelineState tile_start_state_pipe; int tiles_per_cluster = params.tiles_per_cluster; /* Offset pipeline start state for Math WG 2 */ if (warp_group_idx == 2) { // Update pipeline state for next persistent tile tile_start_state_pipe.advance(params.num_iterations); tiles_per_cluster--; } typename PingPongBarrier::Params pingpong_params; pingpong_params.group_id = warp_group_idx - 1; // Since DMA Warp Group Idx 0 will not participate pingpong_params.group_size = NumThreadsPerWarpGroup; // Number of threads / participants in a group PingPongBarrier math_wg_barrier(shared_storage.pingpong_storage, pingpong_params); __syncthreads(); // Ensure All CTAs in Cluster have completed init before issuing commits cute::cluster_arrive_relaxed(); cute::cluster_wait(); // Producer/DMA WarpGroup if (warp_group_idx == 0) { cutlass::arch::warpgroup_reg_dealloc<40>(); // For the DMA (prologue) - we start with an opposite phase - since we skip all waits // i.e., we know that the buffer is indeed empty PipelineState tile_prologue_state_pipe = make_producer_start_state<MainloopPipeline>(); while (tiles_per_cluster > 0) { CollectiveSimulation<ClusterShape,Stages>::dma_wg_simulation(pipeline, tile_prologue_state_pipe, params.num_iterations); // Update pipeline state for next persistent tile tile_prologue_state_pipe.advance(params.num_iterations); tiles_per_cluster--; } } // Math WarpGropups if(warp_group_idx == 1 || warp_group_idx == 2) { cutlass::arch::warpgroup_reg_alloc<232>(); while (tiles_per_cluster > 0) { // MMA math_wg_barrier.wait(); CollectiveSimulation<ClusterShape,Stages>::math_wg_simulation(pipeline, tile_start_state_pipe, params.num_iterations, params.data_ptr); math_wg_barrier.arrive(); // Epilogue math_wg_barrier.wait(); // Simulates long running stage #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) __nanosleep(100000); #endif math_wg_barrier.arrive(); // Update pipeline state for next persistent tile tile_start_state_pipe.advance(params.num_iterations * 2); tiles_per_cluster -= 2; } } // Makes sure remote SMEM doesn't get destroyed cute::cluster_arrive_relaxed(); cute::cluster_wait(); } ///////////////////////////////////////////////////// /// Device NT GMMA + TMA specialized template<uint32_t Stages_, typename ClusterShape_> struct PipelineTest { // // Data members // static constexpr uint32_t Stages = Stages_; static constexpr uint32_t kBlockSize = 128 * 3; using ClusterShape = ClusterShape_; // // Methods // // Run CuTe GEMM kernel hipError_t run(uint32_t const kNumIters, hipStream_t stream = 0) { float elapsed_ms = 0.0f; // Pipeline (multistage pipeline) auto num_stages = Int<Stages>{}; auto cluster_shape = Shape<Int<ClusterShape::kM>, Int<ClusterShape::kN>, _1>{}; // // Configure and launch // int iterations = 1; hipEvent_t events[2]; hipError_t result; for (hipEvent_t & event : events) { result = hipEventCreate(&event); if (result != hipSuccess) { std::cerr << "Error: Failed to create event."; return result; } } result = hipEventRecord(events[0]); if (result != hipSuccess) { std::cerr << "Error: Failed to record start event."; return result; } for (int iter = 0; iter < iterations; ++iter) { using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages, decltype(cluster_shape)>; constexpr int StagesPerMathWarpGroup = 2; constexpr int MathWarpGroupCountPersistent = 2; int smem_size = int(sizeof(SharedStorage<Stages, decltype(cluster_shape), typename cutlass::OrderedSequenceBarrier<StagesPerMathWarpGroup, MathWarpGroupCountPersistent>>)); result = hipFuncSetAttribute( pipeline_device<decltype(cluster_shape), Stages>, hipFuncAttributeMaxDynamicSharedMemorySize, smem_size); // Launch a single Cluster, with kBlockSize threads per CTA dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimBlock(kBlockSize,1,1); int tiles_per_cluster = (kNumIters % 10) + 1; printf("Persistent version: Tiles per Cluster = %d\n", tiles_per_cluster); const void* kernel = (const void*)pipeline_device<decltype(cluster_shape), Stages>; KernelParams params{kNumIters, tiles_per_cluster, nullptr}; void *kernel_params[] = {&params}; cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params); } result = hipEventRecord(events[1]); if (result != hipSuccess) { std::cerr << "Error: Failed to record stop event."; return result; } result = hipDeviceSynchronize(); if (result != hipSuccess) { std::cerr << "Error: hipDeviceSynchronize() failed" << std::endl; return result; } result = hipEventElapsedTime(&elapsed_ms, events[0], events[1]); if (result != hipSuccess) { std::cerr << "Failed to create event."; return result; } for (hipEvent_t & event : events) { (void)hipEventDestroy(event); } return hipSuccess; } }; #if CUDA_12_0_SM90_FEATURES_SUPPORTED TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage5) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 5; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage5) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 5; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x1_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x1_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } #endif
5814ada8b4e81505d041ab7653084eb949b235a0.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit test for the PipelineTmaAsync class used in a WarpSpecialized Persistent loop */ #define KERNEL_DBG_TRACE false #include "../common/cutlass_unit_test.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> #include <cute/arch/cluster_sm90.hpp> #include <cutlass/util/reference/host/gemm.h> #include <cutlass/cluster_launch.hpp> #include "cutlass/core_io.h" #include "cutlass/util/print_error.hpp" #include "cutlass/util/GPU_Clock.hpp" #include "testbed.h" #include "cutlass/pipeline.hpp" #include "cutlass/arch/barrier.h" #include "cute/arch/cluster_sm90.hpp" #include "cutlass/arch/barrier.h" #include "cutlass/arch/reg_reconfig.h" using namespace cute; using namespace cutlass; //////////////////// KERNEL ///////////////////////// template <uint32_t Stages, typename ClusterShape, typename PingPongBarrier> struct SharedStorage { typename cutlass::PipelineTmaAsync<Stages, ClusterShape>::SharedStorage pipeline_storage; typename PingPongBarrier::SharedStorage pingpong_storage; }; template <typename ClusterShape, uint32_t Stages> struct CollectiveSimulation { using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages, ClusterShape>; using PipelineState = typename cutlass::PipelineState<Stages>; CUTLASS_DEVICE static void dma_wg_simulation(MainloopPipeline pipeline, PipelineState tile_start_state_pipe, uint32_t const num_iterations) { uint32_t const per_cta_bytes = sizeof(uint32_t); int warp_idx_in_warpgroup = __shfl_sync(0xffffffff, (threadIdx.x / 32) % 4, 0); int lane_predicate = cute::elect_one_sync(); if (warp_idx_in_warpgroup==0 && lane_predicate) { int tma_k_prologue = min(Stages, num_iterations); // Simulating Prologue TMA Loads CUTLASS_PRAGMA_UNROLL for(int i = 0; i < tma_k_prologue; ++i) { pipeline.producer_acquire(tile_start_state_pipe); // Simulating cp.async.bulk.tensor behavior pipeline.producer_commit(tile_start_state_pipe.index(), per_cta_bytes); ++tile_start_state_pipe; } int tma_k_iter = num_iterations - tma_k_prologue; PipelineState wr_pipe = tile_start_state_pipe; // Simulating Mainloop TMA Loads CUTE_NO_UNROLL for ( ; tma_k_iter > 0; --tma_k_iter){ pipeline.producer_acquire(wr_pipe); // Simulating cp.async.bulk.tensor behavior pipeline.producer_commit(wr_pipe.index(), per_cta_bytes); // Advance write stage ++wr_pipe; } } } CUTLASS_DEVICE static void math_wg_simulation(MainloopPipeline pipeline, PipelineState tile_start_state_pipe, uint32_t const num_iterations, int* data_ptr) { PipelineState rd_pipe = tile_start_state_pipe; PipelineState release_pipe = rd_pipe; // simulates accumulators + extra reg. pressure int arr[168]; // Init Shared Memory read stages & PhaseBit static constexpr uint32_t K_PIPE_MMAS = 1; static_assert( K_PIPE_MMAS < Stages, "ERROR : Too many MMAs in flight"); // Total number of gemm iterations auto gemm_k_iterations = num_iterations; // Simulating Prologue MMAs int mma_k_prologue = min(K_PIPE_MMAS, gemm_k_iterations); CUTLASS_PRAGMA_UNROLL for (int iter = 0; iter < mma_k_prologue; ++iter) { pipeline.consumer_wait(rd_pipe); warpgroup_arrive(); // GMMA would typically happen here ++rd_pipe; } gemm_k_iterations -= mma_k_prologue; // Simulating Mainloop MMAs CUTLASS_PRAGMA_NO_UNROLL for ( ; gemm_k_iterations > 0; --gemm_k_iterations) { /// Wait on the rd_pipe stage / phase pipeline.consumer_wait(rd_pipe); warpgroup_arrive(); // GMMA would typically happen here // Dummy op - which will never happen // But simulates high register usage. CUTE_UNROLL for(int i = 0; i < 168; ++i){ if (threadIdx.x > 384){ arr[i] += data_ptr[i]; } } pipeline.consumer_release(release_pipe); // Advance stages ++rd_pipe; ++release_pipe; } // Dummy op - which will never happen CUTE_UNROLL for(int i = 0; i < 168; ++i){ if (threadIdx.x > 384){ data_ptr[i] = arr[i]; } } // Tail Loop for (int i = 0; i < K_PIPE_MMAS; ++i){ pipeline.consumer_release(release_pipe); ++release_pipe; } } }; struct KernelParams { uint32_t num_iterations; int tiles_per_cluster; int* data_ptr; }; // Goal of this kernel is to complete deadlock-free template <typename ClusterShape, uint32_t Stages> __launch_bounds__(384, 1) __global__ static void pipeline_device(KernelParams params) { extern __shared__ char shared_memory[]; using DispatchPolicy = cutlass::gemm::MainloopSm90TmaGmmaWarpSpecialized<Stages, ClusterShape, cutlass::gemm::KernelTmaWarpSpecializedPersistent>; using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages, ClusterShape>; using PipelineState = typename cutlass::PipelineState<Stages>; /* One for Mainloop and one for Epilogue */ constexpr int StagesPerMathWarpGroup = 2; constexpr int MathWarpGroupCountPersistent = 2; using PingPongBarrier = typename cutlass::OrderedSequenceBarrier<StagesPerMathWarpGroup, MathWarpGroupCountPersistent>; using SharedStorage = SharedStorage<Stages, ClusterShape, PingPongBarrier>; SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory); auto cta_layout = Layout<ClusterShape>{}; // (m,n) -> cta_id int warp_group_idx = __shfl_sync(0xffffffff, threadIdx.x / NumThreadsPerWarpGroup, 0); int warp_group_thread_idx = threadIdx.x % NumThreadsPerWarpGroup; dim3 block_id_in_cluster = cute::block_id_in_cluster(); auto cluster_shape = ClusterShape{}; // #Producers = #RowsInCluster + #ColsInCluster - 1 uint32_t const NumProducers = cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1; uint32_t const TmaTransactionBytes = static_cast<uint32_t>(sizeof(uint32_t) * NumProducers); // mbarrier.init typename MainloopPipeline::Params pipeline_params; pipeline_params.transaction_bytes = TmaTransactionBytes; if (warp_group_idx == 0) { pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; } else { pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; } pipeline_params.is_leader = warp_group_thread_idx == 0; pipeline_params.num_consumers = NumThreadsPerWarpGroup; MainloopPipeline pipeline(shared_storage.pipeline_storage, pipeline_params); PipelineState tile_start_state_pipe; int tiles_per_cluster = params.tiles_per_cluster; /* Offset pipeline start state for Math WG 2 */ if (warp_group_idx == 2) { // Update pipeline state for next persistent tile tile_start_state_pipe.advance(params.num_iterations); tiles_per_cluster--; } typename PingPongBarrier::Params pingpong_params; pingpong_params.group_id = warp_group_idx - 1; // Since DMA Warp Group Idx 0 will not participate pingpong_params.group_size = NumThreadsPerWarpGroup; // Number of threads / participants in a group PingPongBarrier math_wg_barrier(shared_storage.pingpong_storage, pingpong_params); __syncthreads(); // Ensure All CTAs in Cluster have completed init before issuing commits cute::cluster_arrive_relaxed(); cute::cluster_wait(); // Producer/DMA WarpGroup if (warp_group_idx == 0) { cutlass::arch::warpgroup_reg_dealloc<40>(); // For the DMA (prologue) - we start with an opposite phase - since we skip all waits // i.e., we know that the buffer is indeed empty PipelineState tile_prologue_state_pipe = make_producer_start_state<MainloopPipeline>(); while (tiles_per_cluster > 0) { CollectiveSimulation<ClusterShape,Stages>::dma_wg_simulation(pipeline, tile_prologue_state_pipe, params.num_iterations); // Update pipeline state for next persistent tile tile_prologue_state_pipe.advance(params.num_iterations); tiles_per_cluster--; } } // Math WarpGropups if(warp_group_idx == 1 || warp_group_idx == 2) { cutlass::arch::warpgroup_reg_alloc<232>(); while (tiles_per_cluster > 0) { // MMA math_wg_barrier.wait(); CollectiveSimulation<ClusterShape,Stages>::math_wg_simulation(pipeline, tile_start_state_pipe, params.num_iterations, params.data_ptr); math_wg_barrier.arrive(); // Epilogue math_wg_barrier.wait(); // Simulates long running stage #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) __nanosleep(100000); #endif math_wg_barrier.arrive(); // Update pipeline state for next persistent tile tile_start_state_pipe.advance(params.num_iterations * 2); tiles_per_cluster -= 2; } } // Makes sure remote SMEM doesn't get destroyed cute::cluster_arrive_relaxed(); cute::cluster_wait(); } ///////////////////////////////////////////////////// /// Device NT GMMA + TMA specialized template<uint32_t Stages_, typename ClusterShape_> struct PipelineTest { // // Data members // static constexpr uint32_t Stages = Stages_; static constexpr uint32_t kBlockSize = 128 * 3; using ClusterShape = ClusterShape_; // // Methods // // Run CuTe GEMM kernel cudaError_t run(uint32_t const kNumIters, cudaStream_t stream = 0) { float elapsed_ms = 0.0f; // Pipeline (multistage pipeline) auto num_stages = Int<Stages>{}; auto cluster_shape = Shape<Int<ClusterShape::kM>, Int<ClusterShape::kN>, _1>{}; // // Configure and launch // int iterations = 1; cudaEvent_t events[2]; cudaError_t result; for (cudaEvent_t & event : events) { result = cudaEventCreate(&event); if (result != cudaSuccess) { std::cerr << "Error: Failed to create event."; return result; } } result = cudaEventRecord(events[0]); if (result != cudaSuccess) { std::cerr << "Error: Failed to record start event."; return result; } for (int iter = 0; iter < iterations; ++iter) { using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages, decltype(cluster_shape)>; constexpr int StagesPerMathWarpGroup = 2; constexpr int MathWarpGroupCountPersistent = 2; int smem_size = int(sizeof(SharedStorage<Stages, decltype(cluster_shape), typename cutlass::OrderedSequenceBarrier<StagesPerMathWarpGroup, MathWarpGroupCountPersistent>>)); result = cudaFuncSetAttribute( pipeline_device<decltype(cluster_shape), Stages>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); // Launch a single Cluster, with kBlockSize threads per CTA dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimBlock(kBlockSize,1,1); int tiles_per_cluster = (kNumIters % 10) + 1; printf("Persistent version: Tiles per Cluster = %d\n", tiles_per_cluster); const void* kernel = (const void*)pipeline_device<decltype(cluster_shape), Stages>; KernelParams params{kNumIters, tiles_per_cluster, nullptr}; void *kernel_params[] = {&params}; cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params); } result = cudaEventRecord(events[1]); if (result != cudaSuccess) { std::cerr << "Error: Failed to record stop event."; return result; } result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl; return result; } result = cudaEventElapsedTime(&elapsed_ms, events[0], events[1]); if (result != cudaSuccess) { std::cerr << "Failed to create event."; return result; } for (cudaEvent_t & event : events) { (void)cudaEventDestroy(event); } return cudaSuccess; } }; #if CUDA_12_0_SM90_FEATURES_SUPPORTED TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage5) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 5; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage5) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 5; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x1_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x1_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } #endif
8a15c18a4f7f07bb3086f97c2eb273b05a208a86.hip
// !!! This is a file automatically generated by hipify!!! /* ----------------------------------------------------------------- * Programmer(s): Slaven Peles, and Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the testing routine to check the MPIPlusX NVECTOR where * the X is the CUDA NVECTOR. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <sundials/sundials_types.h> #include <nvector/nvector_cuda.h> #include <nvector/nvector_mpiplusx.h> #include <sundials/sundials_math.h> #include "test_nvector.h" #include <mpi.h> /* CUDA vector can use unmanaged or managed memory */ enum mem_type { UNMANAGED, MANAGED }; /* ---------------------------------------------------------------------- * Main NVector Testing Routine * --------------------------------------------------------------------*/ int main(int argc, char *argv[]) { int fails = 0; /* counter for test failures */ int globfails = 0; /* counter for test failures */ int retval; /* function return value */ sunindextype local_length; /* local vector length */ sunindextype global_length; /* global vector length */ N_Vector U, V, X; /* local test vectors */ N_Vector plusU, plusV, plusX; /* MPIPlusX test vectors */ N_Vector plusY, plusZ; /* MPIPlusX test vectors */ int print_timing; /* turn timing on/off */ MPI_Comm comm; /* MPI Communicator */ int nprocs, myid; /* Number of procs, proc id */ int i; /* Get processor number and total number of processes */ MPI_Init(&argc, &argv); comm = MPI_COMM_WORLD; MPI_Comm_size(comm, &nprocs); MPI_Comm_rank(comm, &myid); /* check inputs */ if (argc < 3) { if (myid == 0) printf("ERROR: TWO (2) Inputs required: vector length, print timing \n"); MPI_Abort(comm, -1); } local_length = (sunindextype) atol(argv[1]); if (local_length < 1) { if (myid == 0) printf("ERROR: local vector length must be a positive integer \n"); MPI_Abort(comm, -1); } print_timing = atoi(argv[2]); SetTiming(print_timing, myid); /* global length */ global_length = nprocs*local_length; for (i=UNMANAGED; i<=MANAGED; ++i) { if (myid == 0) { if (i==UNMANAGED) { printf("Testing CUDA N_Vector \n"); } else { printf("\nTesting CUDA N_Vector with managed memory \n"); } printf("Vector global length %ld \n", (long int) global_length); printf("MPI processes %d \n", nprocs); } /* Create new local vectors */ X = (i==UNMANAGED) ? N_VNew_Cuda(local_length) : N_VNewManaged_Cuda(local_length); if (X == NULL) { if (myid == 0) printf("FAIL: Unable to create a new CUDA vector \n\n"); MPI_Abort(comm, 1); } /* Create the MPI+X vector */ plusX = N_VMake_MPIPlusX(comm, X); if (plusX == NULL) { N_VDestroy(X); if (myid == 0) printf("FAIL: Unable to create a new MPIPlusX vector \n\n"); MPI_Abort(comm, 1); } /* Check vector ID */ fails += Test_N_VGetVectorID(plusX, SUNDIALS_NVEC_MPIPLUSX, myid); /* Check vector length */ fails += Test_N_VGetLength(plusX, myid); /* Check vector communicator */ fails += Test_N_VGetCommunicatorMPI(plusX, &comm, myid); /* Test clone functions */ fails += Test_N_VCloneEmpty(plusX, myid); fails += Test_N_VClone(plusX, local_length, myid); fails += Test_N_VCloneEmptyVectorArray(5, plusX, myid); fails += Test_N_VCloneVectorArray(5, plusX, local_length, myid); /* Clone additional vectors for testing */ plusY = N_VClone(plusX); if (plusY == NULL) { N_VDestroy(X); N_VDestroy(plusX); if (myid == 0) printf("FAIL: Unable to create a new vector \n\n"); MPI_Abort(comm, 1); } plusZ = N_VClone(plusX); if (plusZ == NULL) { N_VDestroy(X); N_VDestroy(plusX); N_VDestroy(plusY); if (myid == 0) printf("FAIL: Unable to create a new vector \n\n"); MPI_Abort(comm, 1); } /* Standard vector operation tests */ if (myid == 0) printf("\nTesting standard vector operations:\n\n"); fails += Test_N_VConst(plusX, local_length, myid); fails += Test_N_VLinearSum(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VProd(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VDiv(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VScale(plusX, plusZ, local_length, myid); fails += Test_N_VAbs(plusX, plusZ, local_length, myid); fails += Test_N_VInv(plusX, plusZ, local_length, myid); fails += Test_N_VAddConst(plusX, plusZ, local_length, myid); fails += Test_N_VDotProd(plusX, plusY, local_length, myid); fails += Test_N_VMaxNorm(plusX, local_length, myid); fails += Test_N_VWrmsNorm(plusX, plusY, local_length, myid); fails += Test_N_VWrmsNormMask(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VMin(plusX, local_length, myid); fails += Test_N_VWL2Norm(plusX, plusY, local_length, myid); fails += Test_N_VL1Norm(plusX, local_length, myid); fails += Test_N_VCompare(plusX, plusZ, local_length, myid); fails += Test_N_VInvTest(plusX, plusZ, local_length, myid); fails += Test_N_VConstrMask(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VMinQuotient(plusX, plusY, local_length, myid); /* Fused and vector array operations tests (disabled) */ if (myid == 0) printf("\nTesting fused and vector array operations (disabled):\n\n"); /* create vector and disable all fused and vector array operations */ U = (i==UNMANAGED) ? N_VNew_Cuda(local_length) : N_VNewManaged_Cuda(local_length); retval = N_VEnableFusedOps_Cuda(U, SUNFALSE); if (U == NULL || retval != 0) { N_VDestroy(X); N_VDestroy(plusX); N_VDestroy(plusY); N_VDestroy(plusZ); if (myid == 0) printf("FAIL: Unable to create a new CUDA vector \n\n"); MPI_Abort(comm, 1); } /* create the MPIPlusX vector */ plusU = N_VMake_MPIPlusX(comm, U); if (U == NULL || retval != 0) { N_VDestroy(X); N_VDestroy(U); N_VDestroy(plusX); N_VDestroy(plusY); N_VDestroy(plusZ); if (myid == 0) printf("FAIL: Unable to create a new MPIPlusX vector \n\n"); MPI_Abort(comm, 1); } /* fused operations */ fails += Test_N_VLinearCombination(plusU, local_length, myid); fails += Test_N_VScaleAddMulti(plusU, local_length, myid); fails += Test_N_VDotProdMulti(plusU, local_length, myid); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(plusU, local_length, myid); fails += Test_N_VScaleVectorArray(plusU, local_length, myid); fails += Test_N_VConstVectorArray(plusU, local_length, myid); fails += Test_N_VWrmsNormVectorArray(plusU, local_length, myid); fails += Test_N_VWrmsNormMaskVectorArray(plusU, local_length, myid); fails += Test_N_VScaleAddMultiVectorArray(plusU, local_length, myid); fails += Test_N_VLinearCombinationVectorArray(plusU, local_length, myid); /* Fused and vector array operations tests (enabled) */ if (myid == 0) printf("\nTesting fused and vector array operations (enabled):\n\n"); /* create vector and enable all fused and vector array operations */ V = (i==UNMANAGED) ? N_VNew_Cuda(local_length) : N_VNewManaged_Cuda(local_length); retval = N_VEnableFusedOps_Cuda(V, SUNTRUE); if (V == NULL || retval != 0) { N_VDestroy(X); N_VDestroy(U); N_VDestroy(plusX); N_VDestroy(plusY); N_VDestroy(plusZ); N_VDestroy(plusU); if (myid == 0) printf("FAIL: Unable to create a new CUDA vector \n\n"); MPI_Abort(comm, 1); } /* create the MPIPlusX vector */ plusV = N_VMake_MPIPlusX(comm, V); if (V == NULL || retval != 0) { N_VDestroy(X); N_VDestroy(U); N_VDestroy(V); N_VDestroy(plusU); N_VDestroy(plusX); N_VDestroy(plusY); N_VDestroy(plusZ); if (myid == 0) printf("FAIL: Unable to create a new MPIPlusX vector \n\n"); MPI_Abort(comm, 1); } /* fused operations */ fails += Test_N_VLinearCombination(plusV, local_length, myid); fails += Test_N_VScaleAddMulti(plusV, local_length, myid); fails += Test_N_VDotProdMulti(plusV, local_length, myid); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(plusV, local_length, myid); fails += Test_N_VScaleVectorArray(plusV, local_length, myid); fails += Test_N_VConstVectorArray(plusV, local_length, myid); fails += Test_N_VWrmsNormVectorArray(plusV, local_length, myid); fails += Test_N_VWrmsNormMaskVectorArray(plusV, local_length, myid); fails += Test_N_VScaleAddMultiVectorArray(plusV, local_length, myid); fails += Test_N_VLinearCombinationVectorArray(plusV, local_length, myid); /* local reduction operations */ printf("\nTesting local reduction operations:\n\n"); fails += Test_N_VDotProdLocal(plusX, plusY, local_length, myid); fails += Test_N_VMaxNormLocal(plusX, local_length, myid); fails += Test_N_VMinLocal(plusX, local_length, myid); fails += Test_N_VL1NormLocal(plusX, local_length, myid); fails += Test_N_VWSqrSumLocal(plusX, plusY, local_length, myid); fails += Test_N_VWSqrSumMaskLocal(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VInvTestLocal(plusX, plusZ, local_length, myid); fails += Test_N_VConstrMaskLocal(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VMinQuotientLocal(plusX, plusY, local_length, myid); /* XBraid interface operations */ printf("\nTesting XBraid interface operations:\n\n"); fails += Test_N_VBufSize(plusX, local_length, myid); fails += Test_N_VBufPack(plusX, local_length, myid); fails += Test_N_VBufUnpack(plusX, local_length, myid); /* Free vectors */ N_VDestroy(X); N_VDestroy(U); N_VDestroy(V); N_VDestroy(plusX); N_VDestroy(plusY); N_VDestroy(plusZ); N_VDestroy(plusU); N_VDestroy(plusV); } /* Print result */ if (fails) { printf("FAIL: NVector module failed %i tests, Proc %d \n\n", fails, myid); } else { if (myid == 0) printf("SUCCESS: NVector module passed all tests \n\n"); } /* check if any other process failed */ (void) MPI_Allreduce(&fails, &globfails, 1, MPI_INT, MPI_MAX, comm); MPI_Finalize(); return(globfails); } /* ---------------------------------------------------------------------- * Implementation specific utility functions for vector tests * --------------------------------------------------------------------*/ int check_ans(realtype ans, N_Vector plusX, sunindextype local_length) { int failure = 0; sunindextype i; realtype *Xdata; N_Vector X; X = N_VGetLocalVector_MPIPlusX(plusX); N_VCopyFromDevice_Cuda(X); Xdata = N_VGetHostArrayPointer_Cuda(X); /* check vector data */ for (i = 0; i < local_length; i++) { failure += FNEQ(Xdata[i], ans); } return (failure > ZERO) ? (1) : (0); } booleantype has_data(N_Vector plusX) { N_Vector X = N_VGetLocalVector_MPIPlusX(plusX); /* check if vector data is non-null */ if ((N_VGetHostArrayPointer_Cuda(X) == NULL) && (N_VGetDeviceArrayPointer_Cuda(X) == NULL)) return SUNFALSE; return SUNTRUE; } void set_element(N_Vector plusX, sunindextype i, realtype val) { /* set i-th element of data array */ set_element_range(plusX, i, i, val); } void set_element_range(N_Vector plusX, sunindextype is, sunindextype ie, realtype val) { sunindextype i; realtype* xd; N_Vector X; X = N_VGetLocalVector_MPIPlusX(plusX); /* set elements [is,ie] of the data array */ N_VCopyFromDevice_Cuda(X); xd = N_VGetHostArrayPointer_Cuda(X); for(i = is; i <= ie; i++) xd[i] = val; N_VCopyToDevice_Cuda(X); } realtype get_element(N_Vector plusX, sunindextype i) { N_Vector X = N_VGetLocalVector_MPIPlusX(plusX); /* get i-th element of data array */ N_VCopyFromDevice_Cuda(X); return (N_VGetHostArrayPointer_Cuda(X))[i]; } double max_time(N_Vector plusX, double time) { MPI_Comm *comm; double maxt; comm = (MPI_Comm*) N_VGetCommunicator(plusX); /* get max time across all MPI ranks */ (void) MPI_Reduce(&time, &maxt, 1, MPI_DOUBLE, MPI_MAX, 0, *comm); return(maxt); } void sync_device(N_Vector x) { /* sync with GPU */ hipDeviceSynchronize(); return; }
8a15c18a4f7f07bb3086f97c2eb273b05a208a86.cu
/* ----------------------------------------------------------------- * Programmer(s): Slaven Peles, and Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the testing routine to check the MPIPlusX NVECTOR where * the X is the CUDA NVECTOR. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <sundials/sundials_types.h> #include <nvector/nvector_cuda.h> #include <nvector/nvector_mpiplusx.h> #include <sundials/sundials_math.h> #include "test_nvector.h" #include <mpi.h> /* CUDA vector can use unmanaged or managed memory */ enum mem_type { UNMANAGED, MANAGED }; /* ---------------------------------------------------------------------- * Main NVector Testing Routine * --------------------------------------------------------------------*/ int main(int argc, char *argv[]) { int fails = 0; /* counter for test failures */ int globfails = 0; /* counter for test failures */ int retval; /* function return value */ sunindextype local_length; /* local vector length */ sunindextype global_length; /* global vector length */ N_Vector U, V, X; /* local test vectors */ N_Vector plusU, plusV, plusX; /* MPIPlusX test vectors */ N_Vector plusY, plusZ; /* MPIPlusX test vectors */ int print_timing; /* turn timing on/off */ MPI_Comm comm; /* MPI Communicator */ int nprocs, myid; /* Number of procs, proc id */ int i; /* Get processor number and total number of processes */ MPI_Init(&argc, &argv); comm = MPI_COMM_WORLD; MPI_Comm_size(comm, &nprocs); MPI_Comm_rank(comm, &myid); /* check inputs */ if (argc < 3) { if (myid == 0) printf("ERROR: TWO (2) Inputs required: vector length, print timing \n"); MPI_Abort(comm, -1); } local_length = (sunindextype) atol(argv[1]); if (local_length < 1) { if (myid == 0) printf("ERROR: local vector length must be a positive integer \n"); MPI_Abort(comm, -1); } print_timing = atoi(argv[2]); SetTiming(print_timing, myid); /* global length */ global_length = nprocs*local_length; for (i=UNMANAGED; i<=MANAGED; ++i) { if (myid == 0) { if (i==UNMANAGED) { printf("Testing CUDA N_Vector \n"); } else { printf("\nTesting CUDA N_Vector with managed memory \n"); } printf("Vector global length %ld \n", (long int) global_length); printf("MPI processes %d \n", nprocs); } /* Create new local vectors */ X = (i==UNMANAGED) ? N_VNew_Cuda(local_length) : N_VNewManaged_Cuda(local_length); if (X == NULL) { if (myid == 0) printf("FAIL: Unable to create a new CUDA vector \n\n"); MPI_Abort(comm, 1); } /* Create the MPI+X vector */ plusX = N_VMake_MPIPlusX(comm, X); if (plusX == NULL) { N_VDestroy(X); if (myid == 0) printf("FAIL: Unable to create a new MPIPlusX vector \n\n"); MPI_Abort(comm, 1); } /* Check vector ID */ fails += Test_N_VGetVectorID(plusX, SUNDIALS_NVEC_MPIPLUSX, myid); /* Check vector length */ fails += Test_N_VGetLength(plusX, myid); /* Check vector communicator */ fails += Test_N_VGetCommunicatorMPI(plusX, &comm, myid); /* Test clone functions */ fails += Test_N_VCloneEmpty(plusX, myid); fails += Test_N_VClone(plusX, local_length, myid); fails += Test_N_VCloneEmptyVectorArray(5, plusX, myid); fails += Test_N_VCloneVectorArray(5, plusX, local_length, myid); /* Clone additional vectors for testing */ plusY = N_VClone(plusX); if (plusY == NULL) { N_VDestroy(X); N_VDestroy(plusX); if (myid == 0) printf("FAIL: Unable to create a new vector \n\n"); MPI_Abort(comm, 1); } plusZ = N_VClone(plusX); if (plusZ == NULL) { N_VDestroy(X); N_VDestroy(plusX); N_VDestroy(plusY); if (myid == 0) printf("FAIL: Unable to create a new vector \n\n"); MPI_Abort(comm, 1); } /* Standard vector operation tests */ if (myid == 0) printf("\nTesting standard vector operations:\n\n"); fails += Test_N_VConst(plusX, local_length, myid); fails += Test_N_VLinearSum(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VProd(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VDiv(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VScale(plusX, plusZ, local_length, myid); fails += Test_N_VAbs(plusX, plusZ, local_length, myid); fails += Test_N_VInv(plusX, plusZ, local_length, myid); fails += Test_N_VAddConst(plusX, plusZ, local_length, myid); fails += Test_N_VDotProd(plusX, plusY, local_length, myid); fails += Test_N_VMaxNorm(plusX, local_length, myid); fails += Test_N_VWrmsNorm(plusX, plusY, local_length, myid); fails += Test_N_VWrmsNormMask(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VMin(plusX, local_length, myid); fails += Test_N_VWL2Norm(plusX, plusY, local_length, myid); fails += Test_N_VL1Norm(plusX, local_length, myid); fails += Test_N_VCompare(plusX, plusZ, local_length, myid); fails += Test_N_VInvTest(plusX, plusZ, local_length, myid); fails += Test_N_VConstrMask(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VMinQuotient(plusX, plusY, local_length, myid); /* Fused and vector array operations tests (disabled) */ if (myid == 0) printf("\nTesting fused and vector array operations (disabled):\n\n"); /* create vector and disable all fused and vector array operations */ U = (i==UNMANAGED) ? N_VNew_Cuda(local_length) : N_VNewManaged_Cuda(local_length); retval = N_VEnableFusedOps_Cuda(U, SUNFALSE); if (U == NULL || retval != 0) { N_VDestroy(X); N_VDestroy(plusX); N_VDestroy(plusY); N_VDestroy(plusZ); if (myid == 0) printf("FAIL: Unable to create a new CUDA vector \n\n"); MPI_Abort(comm, 1); } /* create the MPIPlusX vector */ plusU = N_VMake_MPIPlusX(comm, U); if (U == NULL || retval != 0) { N_VDestroy(X); N_VDestroy(U); N_VDestroy(plusX); N_VDestroy(plusY); N_VDestroy(plusZ); if (myid == 0) printf("FAIL: Unable to create a new MPIPlusX vector \n\n"); MPI_Abort(comm, 1); } /* fused operations */ fails += Test_N_VLinearCombination(plusU, local_length, myid); fails += Test_N_VScaleAddMulti(plusU, local_length, myid); fails += Test_N_VDotProdMulti(plusU, local_length, myid); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(plusU, local_length, myid); fails += Test_N_VScaleVectorArray(plusU, local_length, myid); fails += Test_N_VConstVectorArray(plusU, local_length, myid); fails += Test_N_VWrmsNormVectorArray(plusU, local_length, myid); fails += Test_N_VWrmsNormMaskVectorArray(plusU, local_length, myid); fails += Test_N_VScaleAddMultiVectorArray(plusU, local_length, myid); fails += Test_N_VLinearCombinationVectorArray(plusU, local_length, myid); /* Fused and vector array operations tests (enabled) */ if (myid == 0) printf("\nTesting fused and vector array operations (enabled):\n\n"); /* create vector and enable all fused and vector array operations */ V = (i==UNMANAGED) ? N_VNew_Cuda(local_length) : N_VNewManaged_Cuda(local_length); retval = N_VEnableFusedOps_Cuda(V, SUNTRUE); if (V == NULL || retval != 0) { N_VDestroy(X); N_VDestroy(U); N_VDestroy(plusX); N_VDestroy(plusY); N_VDestroy(plusZ); N_VDestroy(plusU); if (myid == 0) printf("FAIL: Unable to create a new CUDA vector \n\n"); MPI_Abort(comm, 1); } /* create the MPIPlusX vector */ plusV = N_VMake_MPIPlusX(comm, V); if (V == NULL || retval != 0) { N_VDestroy(X); N_VDestroy(U); N_VDestroy(V); N_VDestroy(plusU); N_VDestroy(plusX); N_VDestroy(plusY); N_VDestroy(plusZ); if (myid == 0) printf("FAIL: Unable to create a new MPIPlusX vector \n\n"); MPI_Abort(comm, 1); } /* fused operations */ fails += Test_N_VLinearCombination(plusV, local_length, myid); fails += Test_N_VScaleAddMulti(plusV, local_length, myid); fails += Test_N_VDotProdMulti(plusV, local_length, myid); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(plusV, local_length, myid); fails += Test_N_VScaleVectorArray(plusV, local_length, myid); fails += Test_N_VConstVectorArray(plusV, local_length, myid); fails += Test_N_VWrmsNormVectorArray(plusV, local_length, myid); fails += Test_N_VWrmsNormMaskVectorArray(plusV, local_length, myid); fails += Test_N_VScaleAddMultiVectorArray(plusV, local_length, myid); fails += Test_N_VLinearCombinationVectorArray(plusV, local_length, myid); /* local reduction operations */ printf("\nTesting local reduction operations:\n\n"); fails += Test_N_VDotProdLocal(plusX, plusY, local_length, myid); fails += Test_N_VMaxNormLocal(plusX, local_length, myid); fails += Test_N_VMinLocal(plusX, local_length, myid); fails += Test_N_VL1NormLocal(plusX, local_length, myid); fails += Test_N_VWSqrSumLocal(plusX, plusY, local_length, myid); fails += Test_N_VWSqrSumMaskLocal(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VInvTestLocal(plusX, plusZ, local_length, myid); fails += Test_N_VConstrMaskLocal(plusX, plusY, plusZ, local_length, myid); fails += Test_N_VMinQuotientLocal(plusX, plusY, local_length, myid); /* XBraid interface operations */ printf("\nTesting XBraid interface operations:\n\n"); fails += Test_N_VBufSize(plusX, local_length, myid); fails += Test_N_VBufPack(plusX, local_length, myid); fails += Test_N_VBufUnpack(plusX, local_length, myid); /* Free vectors */ N_VDestroy(X); N_VDestroy(U); N_VDestroy(V); N_VDestroy(plusX); N_VDestroy(plusY); N_VDestroy(plusZ); N_VDestroy(plusU); N_VDestroy(plusV); } /* Print result */ if (fails) { printf("FAIL: NVector module failed %i tests, Proc %d \n\n", fails, myid); } else { if (myid == 0) printf("SUCCESS: NVector module passed all tests \n\n"); } /* check if any other process failed */ (void) MPI_Allreduce(&fails, &globfails, 1, MPI_INT, MPI_MAX, comm); MPI_Finalize(); return(globfails); } /* ---------------------------------------------------------------------- * Implementation specific utility functions for vector tests * --------------------------------------------------------------------*/ int check_ans(realtype ans, N_Vector plusX, sunindextype local_length) { int failure = 0; sunindextype i; realtype *Xdata; N_Vector X; X = N_VGetLocalVector_MPIPlusX(plusX); N_VCopyFromDevice_Cuda(X); Xdata = N_VGetHostArrayPointer_Cuda(X); /* check vector data */ for (i = 0; i < local_length; i++) { failure += FNEQ(Xdata[i], ans); } return (failure > ZERO) ? (1) : (0); } booleantype has_data(N_Vector plusX) { N_Vector X = N_VGetLocalVector_MPIPlusX(plusX); /* check if vector data is non-null */ if ((N_VGetHostArrayPointer_Cuda(X) == NULL) && (N_VGetDeviceArrayPointer_Cuda(X) == NULL)) return SUNFALSE; return SUNTRUE; } void set_element(N_Vector plusX, sunindextype i, realtype val) { /* set i-th element of data array */ set_element_range(plusX, i, i, val); } void set_element_range(N_Vector plusX, sunindextype is, sunindextype ie, realtype val) { sunindextype i; realtype* xd; N_Vector X; X = N_VGetLocalVector_MPIPlusX(plusX); /* set elements [is,ie] of the data array */ N_VCopyFromDevice_Cuda(X); xd = N_VGetHostArrayPointer_Cuda(X); for(i = is; i <= ie; i++) xd[i] = val; N_VCopyToDevice_Cuda(X); } realtype get_element(N_Vector plusX, sunindextype i) { N_Vector X = N_VGetLocalVector_MPIPlusX(plusX); /* get i-th element of data array */ N_VCopyFromDevice_Cuda(X); return (N_VGetHostArrayPointer_Cuda(X))[i]; } double max_time(N_Vector plusX, double time) { MPI_Comm *comm; double maxt; comm = (MPI_Comm*) N_VGetCommunicator(plusX); /* get max time across all MPI ranks */ (void) MPI_Reduce(&time, &maxt, 1, MPI_DOUBLE, MPI_MAX, 0, *comm); return(maxt); } void sync_device(N_Vector x) { /* sync with GPU */ cudaDeviceSynchronize(); return; }
cmp_cmp_ptx.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include "cuda_intrinsics.h" #include "cuda_warptrace.h" #define FLTSIZE sizeof(float) inline int BLK(int data, int blocksize) { return (data + blocksize - 1) / blocksize; } __global__ void kernel_vectorAdd (const float* __restrict__ a_d, const float* __restrict__ b_d, const int N, const int offset, float *c_d, uint *trace_sm, uint *trace_blk, double *trace_start, double *trace_end ) { int tid = threadIdx.x + __mul24(blockIdx.x, blockDim.x); // start trace_sm[tid] = get_smid(); trace_blk[tid] = get_global_blkid(); double start_time = double(timer()) * 1e-6; trace_start[tid] = start_time; float sum = 0.f; if(tid < N) { float a = a_d[tid + offset]; float b = b_d[tid + offset]; for(int i=0; i<64; i++) { sum += a + b; sum += a * b; sum += sum * b; sum += sum * a; sum += a / b; sum += log(a) + log(b); } c_d[tid + offset] = sum; } // end double end_time = double(timer()) * 1e-6; trace_end[tid] = end_time; } int main( int argc, char **argv) { int devid = 0 ; int num_streams = 8; int N = 1 << 20; int dump_trace = 0; if(argc >= 2) num_streams = atoi(argv[1]); if(argc >= 3) devid = atoi(argv[2]); if(argc >= 4) N = atoi(argv[3]); if(argc >= 5) dump_trace = atoi(argv[4]); hipSetDevice(devid); // allocate streams hipStream_t *streams = (hipStream_t *) malloc(num_streams * sizeof(hipStream_t)); // init for (int i = 0; i < num_streams; i++) { checkCudaErrors(hipStreamCreate(&(streams[i]))); } //------------------------------------------------------------------------// // allocate data on the host //------------------------------------------------------------------------// size_t databytes = N * FLTSIZE; float *a_h = NULL; checkCudaErrors(hipHostMalloc((void **)&a_h, N * num_streams * FLTSIZE)); float *b_h = NULL; checkCudaErrors(hipHostMalloc((void **)&b_h, N * num_streams * FLTSIZE)); float *c_h = NULL; checkCudaErrors(hipHostMalloc((void **)&c_h, N * num_streams * FLTSIZE)); for(int i=0; i< N * num_streams; i++) { a_h[i] = 1.1f; b_h[i] = 2.2f; } //------------------------------------------------------------------------// // allocate data on the device //------------------------------------------------------------------------// float *a_d; float *b_d; float *c_d; hipMalloc((void**)&a_d, N * num_streams * FLTSIZE); hipMalloc((void**)&b_d, N * num_streams * FLTSIZE); hipMalloc((void**)&c_d, N * num_streams * FLTSIZE); // kernel configuration dim3 threads = dim3(256, 1, 1); dim3 blocks = dim3(BLK(N, threads.x), 1, 1); // set up warp tracer WarpTrace *streams_trace = new WarpTrace[num_streams]; for(int i=0; i<num_streams; i++) { streams_trace[i].compute_totalthreads(blocks, threads); streams_trace[i].allocate_data(); } // create cuda event handles hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); hipEventRecord(start,0); // copy data to deivce for (int i = 0; i < num_streams; i++) { int offset = i * N; hipMemcpyAsync(&a_d[offset], &a_h[offset], databytes, hipMemcpyHostToDevice, streams[i]); hipMemcpyAsync(&b_d[offset], &b_h[offset], databytes, hipMemcpyHostToDevice, streams[i]); } // launch one worker kernel per stream for (int i = 0; i < num_streams; i++) { int offset = i * N; hipLaunchKernelGGL(( kernel_vectorAdd) , dim3(blocks), dim3(threads), 0, streams[i] , a_d, b_d, N, offset, c_d, streams_trace[i].trace_sm, streams_trace[i].trace_blk, streams_trace[i].trace_start, streams_trace[i].trace_end ); } // copy data back to host for (int i = 0; i < num_streams; i++) { int offset = i * N; hipMemcpyAsync(&c_h[offset], &c_d[offset], databytes, hipMemcpyDeviceToHost, streams[i]); } hipDeviceSynchronize(); float gpuTime_ms= 0; hipEventElapsedTime(&gpuTime_ms, start, stop); //printf("runtime (ms) : %f\n", gpuTime_ms); if(dump_trace == 1) { printf("thread_id,stream_id,sm_id,block_id,start,end\n"); for (int i = 0; i < num_streams; i++) { streams_trace[i].print_log(i); } } //------------------------------------------------------------------------// // free //------------------------------------------------------------------------// for (int i = 0; i < num_streams; i++) { checkCudaErrors(hipStreamDestroy(streams[i])); } checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); hipHostFree(a_h); hipHostFree(b_h); hipHostFree(c_h); hipFree(a_d); hipFree(b_d); hipFree(c_d); delete [] streams_trace; hipDeviceReset(); return 0; }
cmp_cmp_ptx.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include "cuda_intrinsics.h" #include "cuda_warptrace.h" #define FLTSIZE sizeof(float) inline int BLK(int data, int blocksize) { return (data + blocksize - 1) / blocksize; } __global__ void kernel_vectorAdd (const float* __restrict__ a_d, const float* __restrict__ b_d, const int N, const int offset, float *c_d, uint *trace_sm, uint *trace_blk, double *trace_start, double *trace_end ) { int tid = threadIdx.x + __mul24(blockIdx.x, blockDim.x); // start trace_sm[tid] = get_smid(); trace_blk[tid] = get_global_blkid(); double start_time = double(timer()) * 1e-6; trace_start[tid] = start_time; float sum = 0.f; if(tid < N) { float a = a_d[tid + offset]; float b = b_d[tid + offset]; for(int i=0; i<64; i++) { sum += a + b; sum += a * b; sum += sum * b; sum += sum * a; sum += a / b; sum += log(a) + log(b); } c_d[tid + offset] = sum; } // end double end_time = double(timer()) * 1e-6; trace_end[tid] = end_time; } int main( int argc, char **argv) { int devid = 0 ; int num_streams = 8; int N = 1 << 20; int dump_trace = 0; if(argc >= 2) num_streams = atoi(argv[1]); if(argc >= 3) devid = atoi(argv[2]); if(argc >= 4) N = atoi(argv[3]); if(argc >= 5) dump_trace = atoi(argv[4]); cudaSetDevice(devid); // allocate streams cudaStream_t *streams = (cudaStream_t *) malloc(num_streams * sizeof(cudaStream_t)); // init for (int i = 0; i < num_streams; i++) { checkCudaErrors(cudaStreamCreate(&(streams[i]))); } //------------------------------------------------------------------------// // allocate data on the host //------------------------------------------------------------------------// size_t databytes = N * FLTSIZE; float *a_h = NULL; checkCudaErrors(cudaMallocHost((void **)&a_h, N * num_streams * FLTSIZE)); float *b_h = NULL; checkCudaErrors(cudaMallocHost((void **)&b_h, N * num_streams * FLTSIZE)); float *c_h = NULL; checkCudaErrors(cudaMallocHost((void **)&c_h, N * num_streams * FLTSIZE)); for(int i=0; i< N * num_streams; i++) { a_h[i] = 1.1f; b_h[i] = 2.2f; } //------------------------------------------------------------------------// // allocate data on the device //------------------------------------------------------------------------// float *a_d; float *b_d; float *c_d; cudaMalloc((void**)&a_d, N * num_streams * FLTSIZE); cudaMalloc((void**)&b_d, N * num_streams * FLTSIZE); cudaMalloc((void**)&c_d, N * num_streams * FLTSIZE); // kernel configuration dim3 threads = dim3(256, 1, 1); dim3 blocks = dim3(BLK(N, threads.x), 1, 1); // set up warp tracer WarpTrace *streams_trace = new WarpTrace[num_streams]; for(int i=0; i<num_streams; i++) { streams_trace[i].compute_totalthreads(blocks, threads); streams_trace[i].allocate_data(); } // create cuda event handles cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); cudaEventRecord(start,0); // copy data to deivce for (int i = 0; i < num_streams; i++) { int offset = i * N; cudaMemcpyAsync(&a_d[offset], &a_h[offset], databytes, cudaMemcpyHostToDevice, streams[i]); cudaMemcpyAsync(&b_d[offset], &b_h[offset], databytes, cudaMemcpyHostToDevice, streams[i]); } // launch one worker kernel per stream for (int i = 0; i < num_streams; i++) { int offset = i * N; kernel_vectorAdd <<< blocks, threads, 0, streams[i] >>> (a_d, b_d, N, offset, c_d, streams_trace[i].trace_sm, streams_trace[i].trace_blk, streams_trace[i].trace_start, streams_trace[i].trace_end ); } // copy data back to host for (int i = 0; i < num_streams; i++) { int offset = i * N; cudaMemcpyAsync(&c_h[offset], &c_d[offset], databytes, cudaMemcpyDeviceToHost, streams[i]); } cudaDeviceSynchronize(); float gpuTime_ms= 0; cudaEventElapsedTime(&gpuTime_ms, start, stop); //printf("runtime (ms) : %f\n", gpuTime_ms); if(dump_trace == 1) { printf("thread_id,stream_id,sm_id,block_id,start,end\n"); for (int i = 0; i < num_streams; i++) { streams_trace[i].print_log(i); } } //------------------------------------------------------------------------// // free //------------------------------------------------------------------------// for (int i = 0; i < num_streams; i++) { checkCudaErrors(cudaStreamDestroy(streams[i])); } checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); cudaFreeHost(a_h); cudaFreeHost(b_h); cudaFreeHost(c_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); delete [] streams_trace; cudaDeviceReset(); return 0; }
39a52ade80f9441dc9da9b21cee122205a8e50e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) { int batch_index = blockIdx.x; idx += m*nsample*batch_index; grad_out += m*nsample*c*batch_index; grad_points += n*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { // Use atomic add to avoid race condition atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]); } } } }
39a52ade80f9441dc9da9b21cee122205a8e50e7.cu
#include "includes.h" __global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) { int batch_index = blockIdx.x; idx += m*nsample*batch_index; grad_out += m*nsample*c*batch_index; grad_points += n*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { // Use atomic add to avoid race condition atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]); } } } }
28c168561fba9748350c7bbf6db6320cd4bbf893.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" // input: points(b, c, n) idx(b, npoints, nsample) // output: out(b, c, npoints, nsample) __global__ void group_points_kernel(int b, int c, int n, int npoints, int nsample, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { int batch_index = blockIdx.x; points += batch_index * n * c; idx += batch_index * npoints * nsample; out += batch_index * npoints * nsample * c; const int index = threadIdx.y * blockDim.x + threadIdx.x; const int stride = blockDim.y * blockDim.x; for (int i = index; i < c * npoints; i += stride) { const int l = i / npoints; const int j = i % npoints; for (int k = 0; k < nsample; ++k) { int ii = idx[j * nsample + k]; out[(l * npoints + j) * nsample + k] = points[l * n + ii]; } } } void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, const float *points, const int *idx, float *out) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( group_points_kernel), dim3(b), dim3(opt_block_config(npoints, c)), 0, stream, b, c, n, npoints, nsample, points, idx, out); // CUDA_CHECK_ERRORS(); } // input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample) // output: grad_points(b, c, n) __global__ void group_points_grad_kernel(int b, int c, int n, int npoints, int nsample, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { int batch_index = blockIdx.x; grad_out += batch_index * npoints * nsample * c; idx += batch_index * npoints * nsample; grad_points += batch_index * n * c; const int index = threadIdx.y * blockDim.x + threadIdx.x; const int stride = blockDim.y * blockDim.x; for (int i = index; i < c * npoints; i += stride) { const int l = i / npoints; const int j = i % npoints; for (int k = 0; k < nsample; ++k) { int ii = idx[j * nsample + k]; atomicAdd(grad_points + l * n + ii, grad_out[(l * npoints + j) * nsample + k]); } } } void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, int nsample, const float *grad_out, const int *idx, float *grad_points) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( group_points_grad_kernel), dim3(b), dim3(opt_block_config(npoints, c)), 0, stream, b, c, n, npoints, nsample, grad_out, idx, grad_points); //CUDA_CHECK_ERRORS(); }
28c168561fba9748350c7bbf6db6320cd4bbf893.cu
#include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" // input: points(b, c, n) idx(b, npoints, nsample) // output: out(b, c, npoints, nsample) __global__ void group_points_kernel(int b, int c, int n, int npoints, int nsample, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { int batch_index = blockIdx.x; points += batch_index * n * c; idx += batch_index * npoints * nsample; out += batch_index * npoints * nsample * c; const int index = threadIdx.y * blockDim.x + threadIdx.x; const int stride = blockDim.y * blockDim.x; for (int i = index; i < c * npoints; i += stride) { const int l = i / npoints; const int j = i % npoints; for (int k = 0; k < nsample; ++k) { int ii = idx[j * nsample + k]; out[(l * npoints + j) * nsample + k] = points[l * n + ii]; } } } void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, const float *points, const int *idx, float *out) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); group_points_kernel<<<b, opt_block_config(npoints, c), 0, stream>>>( b, c, n, npoints, nsample, points, idx, out); // CUDA_CHECK_ERRORS(); } // input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample) // output: grad_points(b, c, n) __global__ void group_points_grad_kernel(int b, int c, int n, int npoints, int nsample, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { int batch_index = blockIdx.x; grad_out += batch_index * npoints * nsample * c; idx += batch_index * npoints * nsample; grad_points += batch_index * n * c; const int index = threadIdx.y * blockDim.x + threadIdx.x; const int stride = blockDim.y * blockDim.x; for (int i = index; i < c * npoints; i += stride) { const int l = i / npoints; const int j = i % npoints; for (int k = 0; k < nsample; ++k) { int ii = idx[j * nsample + k]; atomicAdd(grad_points + l * n + ii, grad_out[(l * npoints + j) * nsample + k]); } } } void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, int nsample, const float *grad_out, const int *idx, float *grad_points) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); group_points_grad_kernel<<<b, opt_block_config(npoints, c), 0, stream>>>( b, c, n, npoints, nsample, grad_out, idx, grad_points); //CUDA_CHECK_ERRORS(); }
76cfdf11c74aa416878fec1b660745fb985263bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ********************* * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ // modified from // https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu #include "common_cuda_helper.hpp" #include "trt_deform_conv_kernel.cuh" #include "trt_deform_conv_kernel.hpp" #include "trt_plugin_helper.hpp" template <typename scalar_t> void deform_conv_im2col(const scalar_t* input, const scalar_t* offset, scalar_t* column, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, hipStream_t stream) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; hipLaunchKernelGGL(( deformable_im2col_gpu_kernel<scalar_t>), dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, input, offset, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, column); cudaCheckError(); } template <typename scalar_t> void deform_conv(const scalar_t* input, const scalar_t* weight, const scalar_t* offset, scalar_t* output, void* workspace, int batchSize, int nInputPlane, int inputHeight, int inputWidth, int nOutputPlane, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group, int deformable_group, int im2col_step, hipblasHandle_t cublas_handle, hipStream_t stream) { size_t word_size = sizeof(scalar_t); im2col_step = ::min(int(batchSize), im2col_step); long outputWidth = (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; long outputHeight = (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; long outputHW = outputHeight * outputWidth; long kHW = kH * kW; long columns_size = mmdeploy::getAlignedSize(nInputPlane * kHW * im2col_step * outputHW * word_size); // column buffer for img2col char* workspace_ptr = reinterpret_cast<char*>(workspace); scalar_t* columns = reinterpret_cast<scalar_t*>(workspace_ptr); workspace_ptr = workspace_ptr + columns_size; scalar_t* output_buffer; if (im2col_step == 1) { output_buffer = output; } else { // output need permute when im2col_step!=1 output_buffer = reinterpret_cast<scalar_t*>(workspace_ptr); } long input_elt_step = im2col_step * nInputPlane * inputHeight * inputWidth; long offset_elt_step = im2col_step * deformable_group * 2 * kHW * outputHW; long out_buffer_step = nOutputPlane * im2col_step * outputHW; long col_g_step = nInputPlane * kHW * im2col_step * outputHW / group; long weight_g_step = nOutputPlane * nInputPlane * kHW / (group * group); long out_buffer_g_step = out_buffer_step / group; int m = nOutputPlane / group; int n = im2col_step * outputHW; int k = nInputPlane * kHW / group; scalar_t alpha = 1.f; scalar_t beta = 0.f; for (int elt = 0; elt < batchSize / im2col_step; elt++) { const scalar_t* input_start = input + elt * input_elt_step; const scalar_t* offset_start = offset + elt * offset_elt_step; deform_conv_im2col<scalar_t>(input_start, offset_start, columns, nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, im2col_step, deformable_group, stream); for (int g = 0; g < group; ++g) { const scalar_t* weight_start = weight + g * weight_g_step; scalar_t* col_start = columns + g * col_g_step; scalar_t* out_buffer_start = output_buffer + elt * out_buffer_step + g * out_buffer_g_step; cublasGemmWrap<scalar_t>(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, col_start, n, weight_start, k, &beta, out_buffer_start, n); cudaCheckError(); } } if (im2col_step != 1) { int output_buffer_shape[5] = {batchSize / im2col_step, nOutputPlane, im2col_step, static_cast<int>(outputHeight), static_cast<int>(outputWidth)}; int output_buffer_permute[5] = {0, 2, 1, 3, 4}; memcpyPermute<scalar_t>(output, output_buffer, &output_buffer_shape[0], &output_buffer_permute[0], 5, stream); } } template void deform_conv<float>(const float* input, const float* weight, const float* offset, float* output, void* workspace, int batchSize, int nInputPlane, int inputHeight, int inputWidth, int nOutputPlane, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group, int deformable_group, int im2col_step, hipblasHandle_t cublas_handle, hipStream_t stream); template void deform_conv<__half>(const __half* input, const __half* weight, const __half* offset, __half* output, void* workspace, int batchSize, int nInputPlane, int inputHeight, int inputWidth, int nOutputPlane, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group, int deformable_group, int im2col_step, hipblasHandle_t cublas_handle, hipStream_t stream);
76cfdf11c74aa416878fec1b660745fb985263bd.cu
/*! ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ********************* * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ // modified from // https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu #include "common_cuda_helper.hpp" #include "trt_deform_conv_kernel.cuh" #include "trt_deform_conv_kernel.hpp" #include "trt_plugin_helper.hpp" template <typename scalar_t> void deform_conv_im2col(const scalar_t* input, const scalar_t* offset, scalar_t* column, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, cudaStream_t stream) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; deformable_im2col_gpu_kernel<scalar_t><<<GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, input, offset, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, column); cudaCheckError(); } template <typename scalar_t> void deform_conv(const scalar_t* input, const scalar_t* weight, const scalar_t* offset, scalar_t* output, void* workspace, int batchSize, int nInputPlane, int inputHeight, int inputWidth, int nOutputPlane, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group, int deformable_group, int im2col_step, cublasHandle_t cublas_handle, cudaStream_t stream) { size_t word_size = sizeof(scalar_t); im2col_step = std::min(int(batchSize), im2col_step); long outputWidth = (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; long outputHeight = (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; long outputHW = outputHeight * outputWidth; long kHW = kH * kW; long columns_size = mmdeploy::getAlignedSize(nInputPlane * kHW * im2col_step * outputHW * word_size); // column buffer for img2col char* workspace_ptr = reinterpret_cast<char*>(workspace); scalar_t* columns = reinterpret_cast<scalar_t*>(workspace_ptr); workspace_ptr = workspace_ptr + columns_size; scalar_t* output_buffer; if (im2col_step == 1) { output_buffer = output; } else { // output need permute when im2col_step!=1 output_buffer = reinterpret_cast<scalar_t*>(workspace_ptr); } long input_elt_step = im2col_step * nInputPlane * inputHeight * inputWidth; long offset_elt_step = im2col_step * deformable_group * 2 * kHW * outputHW; long out_buffer_step = nOutputPlane * im2col_step * outputHW; long col_g_step = nInputPlane * kHW * im2col_step * outputHW / group; long weight_g_step = nOutputPlane * nInputPlane * kHW / (group * group); long out_buffer_g_step = out_buffer_step / group; int m = nOutputPlane / group; int n = im2col_step * outputHW; int k = nInputPlane * kHW / group; scalar_t alpha = 1.f; scalar_t beta = 0.f; for (int elt = 0; elt < batchSize / im2col_step; elt++) { const scalar_t* input_start = input + elt * input_elt_step; const scalar_t* offset_start = offset + elt * offset_elt_step; deform_conv_im2col<scalar_t>(input_start, offset_start, columns, nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, im2col_step, deformable_group, stream); for (int g = 0; g < group; ++g) { const scalar_t* weight_start = weight + g * weight_g_step; scalar_t* col_start = columns + g * col_g_step; scalar_t* out_buffer_start = output_buffer + elt * out_buffer_step + g * out_buffer_g_step; cublasGemmWrap<scalar_t>(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, col_start, n, weight_start, k, &beta, out_buffer_start, n); cudaCheckError(); } } if (im2col_step != 1) { int output_buffer_shape[5] = {batchSize / im2col_step, nOutputPlane, im2col_step, static_cast<int>(outputHeight), static_cast<int>(outputWidth)}; int output_buffer_permute[5] = {0, 2, 1, 3, 4}; memcpyPermute<scalar_t>(output, output_buffer, &output_buffer_shape[0], &output_buffer_permute[0], 5, stream); } } template void deform_conv<float>(const float* input, const float* weight, const float* offset, float* output, void* workspace, int batchSize, int nInputPlane, int inputHeight, int inputWidth, int nOutputPlane, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group, int deformable_group, int im2col_step, cublasHandle_t cublas_handle, cudaStream_t stream); template void deform_conv<__half>(const __half* input, const __half* weight, const __half* offset, __half* output, void* workspace, int batchSize, int nInputPlane, int inputHeight, int inputWidth, int nOutputPlane, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group, int deformable_group, int im2col_step, cublasHandle_t cublas_handle, cudaStream_t stream);
d8f17b897899d4e4892379b9bc5766eef7c2a0f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ extern "C" __global__ void __launch_bounds__(128) sgemm_nt_32x128 ( short* param_Rand, const float* param_A, const float* param_B, float* param_C, int param_lda, int param_ldb, int param_ldc, int param_m, int param_n, int param_k, float param_alpha, float param_beta, int param_flags, int param_ldaz, int param_ldbz, int param_ldcz, int param_batch_loops ) { __shared__ float share[(128*16 + 32)*2 + (32*16 + 32)*2 + 4]; int tid = threadIdx.x; share[tid] = 1; param_C[tid] = share[127-tid]; }
d8f17b897899d4e4892379b9bc5766eef7c2a0f8.cu
/* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ extern "C" __global__ void __launch_bounds__(128) sgemm_nt_32x128 ( short* param_Rand, const float* param_A, const float* param_B, float* param_C, int param_lda, int param_ldb, int param_ldc, int param_m, int param_n, int param_k, float param_alpha, float param_beta, int param_flags, int param_ldaz, int param_ldbz, int param_ldcz, int param_batch_loops ) { __shared__ float share[(128*16 + 32)*2 + (32*16 + 32)*2 + 4]; int tid = threadIdx.x; share[tid] = 1; param_C[tid] = share[127-tid]; }
fc744308f297b8c73602738bc195538c98bf80d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cellularautomata_kernal.h" #include <hip/hip_vector_types.h> #include <cstdio> //extern "C" bool extern "C" float CUDATimeStep(int* pFlatGrid, int DIM) { int *dev_pFlatGrid; //Pointers to device allocated memory int *dev_DIM; hipEvent_t start,stop; //Events for timings //START: Record duration of GPGPU processing hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); size_t noCells = DIM * DIM * sizeof(int); //Might need to flatten the 2d array ormaybe try "int2" type //Allocate suitable size memory on device hipMalloc((void**) &dev_pFlatGrid, noCells); hipMalloc((void**) &dev_DIM, sizeof(int)); //Make our 2D grid of blocks & threads (DIM/No of threads) //One pixel is one thread. dim3 blocks (DIM/10, DIM/10); dim3 threads(10,10); //Copy our memory from Host to Device hipMemcpy(dev_pFlatGrid, pFlatGrid, noCells, hipMemcpyHostToDevice); hipMemcpy(dev_DIM, &DIM, sizeof(int), hipMemcpyHostToDevice); //Probably really bad way to do this....look into const mem //hipMemcpy(dev_DIM, pDim, sizeof(int), // hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernal), dim3(blocks),dim3(threads), 0, 0, dev_pFlatGrid, dev_DIM); //Copy back to host hipMemcpy(pFlatGrid, dev_pFlatGrid, noCells, hipMemcpyDeviceToHost); //STOP : processing done hipEventRecord(stop,0); hipEventSynchronize(stop); float elapsedTime = 0; hipEventElapsedTime(&elapsedTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); //fix up states - normalize for (int i = 0; i < DIM; ++i) { for (int j = 0; j < DIM; ++j) { pFlatGrid[i * DIM +j] = pFlatGrid[i * DIM +j] >> 1; } } //Free memory on Device hipFree(dev_pFlatGrid); hipFree(dev_DIM); return elapsedTime; }
fc744308f297b8c73602738bc195538c98bf80d8.cu
#include "cellularautomata_kernal.h" #include <vector_types.h> #include <cstdio> //extern "C" bool extern "C" float CUDATimeStep(int* pFlatGrid, int DIM) { int *dev_pFlatGrid; //Pointers to device allocated memory int *dev_DIM; cudaEvent_t start,stop; //Events for timings //START: Record duration of GPGPU processing cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); size_t noCells = DIM * DIM * sizeof(int); //Might need to flatten the 2d array ormaybe try "int2" type //Allocate suitable size memory on device cudaMalloc((void**) &dev_pFlatGrid, noCells); cudaMalloc((void**) &dev_DIM, sizeof(int)); //Make our 2D grid of blocks & threads (DIM/No of threads) //One pixel is one thread. dim3 blocks (DIM/10, DIM/10); dim3 threads(10,10); //Copy our memory from Host to Device cudaMemcpy(dev_pFlatGrid, pFlatGrid, noCells, cudaMemcpyHostToDevice); cudaMemcpy(dev_DIM, &DIM, sizeof(int), cudaMemcpyHostToDevice); //Probably really bad way to do this....look into const mem //cudaMemcpy(dev_DIM, pDim, sizeof(int), // cudaMemcpyHostToDevice); kernal<<<blocks,threads>>>(dev_pFlatGrid, dev_DIM); //Copy back to host cudaMemcpy(pFlatGrid, dev_pFlatGrid, noCells, cudaMemcpyDeviceToHost); //STOP : processing done cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapsedTime = 0; cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); //fix up states - normalize for (int i = 0; i < DIM; ++i) { for (int j = 0; j < DIM; ++j) { pFlatGrid[i * DIM +j] = pFlatGrid[i * DIM +j] >> 1; } } //Free memory on Device cudaFree(dev_pFlatGrid); cudaFree(dev_DIM); return elapsedTime; }
940d119173095e1c8b549be1835da2dc0576fa02.hip
// !!! This is a file automatically generated by hipify!!! #include "THHTensorMath.h" #include "THHGeneral.h" #include "TH/THHalf.h" #include "THHTensorCopy.h" #include "THHApply.cuh" #include "THHNumerics.cuh" #include "THHTensorMathCompareT.cuh" #include "THHTensor.hpp" template <typename T> struct TensorAddConstantOp { TensorAddConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in + val; } __device__ __forceinline__ void operator()(T* v) { *v += val; } const T val; }; template <typename T> struct TensorSubConstantOp { TensorSubConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in - val; } __device__ __forceinline__ void operator()(T* v) { *v -= val; } const T val; }; template <typename T> struct TensorMulConstantOp { TensorMulConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in * val; } __device__ __forceinline__ void operator()(T* v) { *v *= val; } const T val; }; template <typename T> struct TensorDivConstantOp { TensorDivConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in / val; } __device__ __forceinline__ void operator()(T* v) { *v /= val; } const T val; }; template <> struct TensorDivConstantOp<float> { TensorDivConstantOp(float v) : val(1.f / v) {} __device__ __forceinline__ void operator()(float* out, float* in) { *out = *in * val; } __device__ __forceinline__ void operator()(float* v) { *v *= val; } const float val; }; template <> struct TensorDivConstantOp<double> { TensorDivConstantOp(double v) : val(1. / v) {} __device__ __forceinline__ void operator()(double* out, double* in) { *out = *in * val; } __device__ __forceinline__ void operator()(double* v) { *v *= val; } const double val; }; template<typename T> static __device__ __forceinline__ typename std::enable_if<std::is_signed<T>::value, bool>::type modulo_wrap(T a, T b) { return (a != 0) && (a < 0) != (b < 0); } template<typename T> static __device__ __forceinline__ typename std::enable_if<std::is_unsigned<T>::value, bool>::type modulo_wrap(T a, T b) { return false; } template <typename T> struct TensorRemainderOp { TensorRemainderOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in % val; if (modulo_wrap<T>(*out, val)) { *out += val; } } __device__ __forceinline__ void operator()(T* v) { *v = *v % val; if (modulo_wrap<T>(*v, val)) { *v += val; } } const T val; }; template <> struct TensorRemainderOp<float> { TensorRemainderOp(float v) : val(v) {} __device__ __forceinline__ void operator()(float* out, float* in) { *out = *in - val * floorf(*in / val); } __device__ __forceinline__ void operator()(float* v) { *v = *v - val * floorf(*v / val); } const float val; }; template <> struct TensorRemainderOp<double> { TensorRemainderOp(double v) : val(v) {} __device__ __forceinline__ void operator()(double* out, double* in) { *out = *in - val * floor(*in / val); } __device__ __forceinline__ void operator()(double* v) { *v = *v - val * floor(*v / val); } const double val; }; template <> struct TensorRemainderOp<at::Half> { TensorRemainderOp(at::Half v): val(v) {} __device__ __forceinline__ void operator()(at::Half* out, at::Half* in) { *out = *in - val * floorf(*in / val); } __device__ __forceinline__ void operator()(at::Half* v) { *v = *v - val * floorf(*v / val); } const at::Half val; }; template <typename T> struct TensorFmodOp { TensorFmodOp(T v) : val((float)v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = (T) fmodf((float) *in, val); } __device__ __forceinline__ void operator()(T* v) { *v = (T) fmodf((float) *v, val); } const float val; }; template <> struct TensorFmodOp<double> { TensorFmodOp(double v) : val(v) {} __device__ __forceinline__ void operator()(double* out, double* in) { *out = fmod(*in, val); } __device__ __forceinline__ void operator()(double* v) { *v = fmod(*v, val); } const double val; }; template <typename T, int Upper> struct TensorTriOp { TensorTriOp(T *start_, int64_t stride0_, int64_t stride1_, int64_t k_) : start(start_), stride0(stride0_), stride1(stride1_), k(k_) {} __device__ __forceinline__ int mask(T *out) { ptrdiff_t n = out - start; int64_t row, col; if (stride0 > stride1) { row = (int64_t) (n / stride0); col = (int64_t) ((n % stride0) / stride1); } else { row = (int64_t) ((n % stride1) / stride0); col = (int64_t) (n / stride1); } return Upper ? (col - row >= k) : (col - row <= k); } __device__ __forceinline__ void operator()(T* out, T* in) { *out = mask(out) ? *in : ScalarConvert<int, T>::to(0); } __device__ __forceinline__ void operator()(T* v) { if (!mask(v)) *v = ScalarConvert<int, T>::to(0); } const T *start; const int64_t stride0, stride1, k; }; template <typename T> struct TensorLShiftConstantOp { TensorLShiftConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in << val; } __device__ __forceinline__ void operator()(T* v) { *v <<= val; } const T val; }; template <typename T> struct TensorRShiftConstantOp { TensorRShiftConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in >> val; } __device__ __forceinline__ void operator()(T* v) { *v >>= val; } const T val; }; template <typename T> struct TensorBitAndConstantOp { TensorBitAndConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in & val; } __device__ __forceinline__ void operator()(T* v) { *v &= val; } const T val; }; template <typename T> struct TensorBitOrConstantOp { TensorBitOrConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in | val; } __device__ __forceinline__ void operator()(T* v) { *v |= val; } const T val; }; template <typename T> struct TensorBitXorConstantOp { TensorBitXorConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in ^ val; } __device__ __forceinline__ void operator()(T* v) { *v ^= val; } const T val; }; #include "generic/THCTensorMathPairwise.cu" #include "THHGenerateAllTypes.h"
940d119173095e1c8b549be1835da2dc0576fa02.cu
#include "THCTensorMath.h" #include "THCGeneral.h" #include "TH/THHalf.h" #include "THCTensorCopy.h" #include "THCApply.cuh" #include "THCNumerics.cuh" #include "THCTensorMathCompareT.cuh" #include "THCTensor.hpp" template <typename T> struct TensorAddConstantOp { TensorAddConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in + val; } __device__ __forceinline__ void operator()(T* v) { *v += val; } const T val; }; template <typename T> struct TensorSubConstantOp { TensorSubConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in - val; } __device__ __forceinline__ void operator()(T* v) { *v -= val; } const T val; }; template <typename T> struct TensorMulConstantOp { TensorMulConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in * val; } __device__ __forceinline__ void operator()(T* v) { *v *= val; } const T val; }; template <typename T> struct TensorDivConstantOp { TensorDivConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in / val; } __device__ __forceinline__ void operator()(T* v) { *v /= val; } const T val; }; template <> struct TensorDivConstantOp<float> { TensorDivConstantOp(float v) : val(1.f / v) {} __device__ __forceinline__ void operator()(float* out, float* in) { *out = *in * val; } __device__ __forceinline__ void operator()(float* v) { *v *= val; } const float val; }; template <> struct TensorDivConstantOp<double> { TensorDivConstantOp(double v) : val(1. / v) {} __device__ __forceinline__ void operator()(double* out, double* in) { *out = *in * val; } __device__ __forceinline__ void operator()(double* v) { *v *= val; } const double val; }; template<typename T> static __device__ __forceinline__ typename std::enable_if<std::is_signed<T>::value, bool>::type modulo_wrap(T a, T b) { return (a != 0) && (a < 0) != (b < 0); } template<typename T> static __device__ __forceinline__ typename std::enable_if<std::is_unsigned<T>::value, bool>::type modulo_wrap(T a, T b) { return false; } template <typename T> struct TensorRemainderOp { TensorRemainderOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in % val; if (modulo_wrap<T>(*out, val)) { *out += val; } } __device__ __forceinline__ void operator()(T* v) { *v = *v % val; if (modulo_wrap<T>(*v, val)) { *v += val; } } const T val; }; template <> struct TensorRemainderOp<float> { TensorRemainderOp(float v) : val(v) {} __device__ __forceinline__ void operator()(float* out, float* in) { *out = *in - val * floorf(*in / val); } __device__ __forceinline__ void operator()(float* v) { *v = *v - val * floorf(*v / val); } const float val; }; template <> struct TensorRemainderOp<double> { TensorRemainderOp(double v) : val(v) {} __device__ __forceinline__ void operator()(double* out, double* in) { *out = *in - val * floor(*in / val); } __device__ __forceinline__ void operator()(double* v) { *v = *v - val * floor(*v / val); } const double val; }; template <> struct TensorRemainderOp<at::Half> { TensorRemainderOp(at::Half v): val(v) {} __device__ __forceinline__ void operator()(at::Half* out, at::Half* in) { *out = *in - val * floorf(*in / val); } __device__ __forceinline__ void operator()(at::Half* v) { *v = *v - val * floorf(*v / val); } const at::Half val; }; template <typename T> struct TensorFmodOp { TensorFmodOp(T v) : val((float)v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = (T) fmodf((float) *in, val); } __device__ __forceinline__ void operator()(T* v) { *v = (T) fmodf((float) *v, val); } const float val; }; template <> struct TensorFmodOp<double> { TensorFmodOp(double v) : val(v) {} __device__ __forceinline__ void operator()(double* out, double* in) { *out = fmod(*in, val); } __device__ __forceinline__ void operator()(double* v) { *v = fmod(*v, val); } const double val; }; template <typename T, int Upper> struct TensorTriOp { TensorTriOp(T *start_, int64_t stride0_, int64_t stride1_, int64_t k_) : start(start_), stride0(stride0_), stride1(stride1_), k(k_) {} __device__ __forceinline__ int mask(T *out) { ptrdiff_t n = out - start; int64_t row, col; if (stride0 > stride1) { row = (int64_t) (n / stride0); col = (int64_t) ((n % stride0) / stride1); } else { row = (int64_t) ((n % stride1) / stride0); col = (int64_t) (n / stride1); } return Upper ? (col - row >= k) : (col - row <= k); } __device__ __forceinline__ void operator()(T* out, T* in) { *out = mask(out) ? *in : ScalarConvert<int, T>::to(0); } __device__ __forceinline__ void operator()(T* v) { if (!mask(v)) *v = ScalarConvert<int, T>::to(0); } const T *start; const int64_t stride0, stride1, k; }; template <typename T> struct TensorLShiftConstantOp { TensorLShiftConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in << val; } __device__ __forceinline__ void operator()(T* v) { *v <<= val; } const T val; }; template <typename T> struct TensorRShiftConstantOp { TensorRShiftConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in >> val; } __device__ __forceinline__ void operator()(T* v) { *v >>= val; } const T val; }; template <typename T> struct TensorBitAndConstantOp { TensorBitAndConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in & val; } __device__ __forceinline__ void operator()(T* v) { *v &= val; } const T val; }; template <typename T> struct TensorBitOrConstantOp { TensorBitOrConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in | val; } __device__ __forceinline__ void operator()(T* v) { *v |= val; } const T val; }; template <typename T> struct TensorBitXorConstantOp { TensorBitXorConstantOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* out, T* in) { *out = *in ^ val; } __device__ __forceinline__ void operator()(T* v) { *v ^= val; } const T val; }; #include "generic/THCTensorMathPairwise.cu" #include "THCGenerateAllTypes.h"
8000d71705826fea7c04034840660a06495d105b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" //void checkCUDAErrorFn(const char *msg, const char *file, int line) { // hipError_t err = hipGetLastError(); // if (hipSuccess == err) { // return; // } // // fprintf(stderr, "CUDA error"); // if (file) { // fprintf(stderr, " (%s:%d)", file, line); // } // fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); // exit(EXIT_FAILURE); //} namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) { return; } bools[index] = idata[index] == 0 ? 0 : 1; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) { return; } if (bools[index] == 1) { odata[indices[index]] = idata[index]; } } } }
8000d71705826fea7c04034840660a06495d105b.cu
#include "common.h" //void checkCUDAErrorFn(const char *msg, const char *file, int line) { // cudaError_t err = cudaGetLastError(); // if (cudaSuccess == err) { // return; // } // // fprintf(stderr, "CUDA error"); // if (file) { // fprintf(stderr, " (%s:%d)", file, line); // } // fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); // exit(EXIT_FAILURE); //} namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) { return; } bools[index] = idata[index] == 0 ? 0 : 1; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) { return; } if (bools[index] == 1) { odata[indices[index]] = idata[index]; } } } }
adb8c39b5fae7402fe1aded51fa5891284d6e931.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "dropout_layer.h" #include "hip/hip_runtime.h" #include "utils.h" } __global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale) { int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (id < size) input[id] = (rand[id] < prob) ? 0 : input[id] * scale; } void forward_dropout_layer_gpu(dropout_layer layer, network net) { if (!net.train) return; int size = layer.inputs * layer.batch; cuda_random(layer.rand_gpu, size); /* int i; for(i = 0; i < size; ++i){ layer.rand[i] = rand_uniform(); } cuda_push_array(layer.rand_gpu, layer.rand, size); */ yoloswag420blazeit360noscope << < cuda_gridsize(size), BLOCK >> > (net.input_gpu, size, layer.rand_gpu, layer.probability, layer.scale); check_error(hipPeekAtLastError()); } void backward_dropout_layer_gpu(dropout_layer layer, network net) { if (!net.delta_gpu) return; int size = layer.inputs * layer.batch; yoloswag420blazeit360noscope << < cuda_gridsize(size), BLOCK >> > (net.delta_gpu, size, layer.rand_gpu, layer.probability, layer.scale); check_error(hipPeekAtLastError()); }
adb8c39b5fae7402fe1aded51fa5891284d6e931.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "dropout_layer.h" #include "cuda.h" #include "utils.h" } __global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale) { int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (id < size) input[id] = (rand[id] < prob) ? 0 : input[id] * scale; } void forward_dropout_layer_gpu(dropout_layer layer, network net) { if (!net.train) return; int size = layer.inputs * layer.batch; cuda_random(layer.rand_gpu, size); /* int i; for(i = 0; i < size; ++i){ layer.rand[i] = rand_uniform(); } cuda_push_array(layer.rand_gpu, layer.rand, size); */ yoloswag420blazeit360noscope << < cuda_gridsize(size), BLOCK >> > (net.input_gpu, size, layer.rand_gpu, layer.probability, layer.scale); check_error(cudaPeekAtLastError()); } void backward_dropout_layer_gpu(dropout_layer layer, network net) { if (!net.delta_gpu) return; int size = layer.inputs * layer.batch; yoloswag420blazeit360noscope << < cuda_gridsize(size), BLOCK >> > (net.delta_gpu, size, layer.rand_gpu, layer.probability, layer.scale); check_error(cudaPeekAtLastError()); }
cc20e2621b2a23075496a1d3825f7b7c7dfe70e7.hip
// !!! This is a file automatically generated by hipify!!! #define BLOCK_SIZE 64 #define _DEBUG #include "cutil.h" #include <hip/hip_runtime.h> #include <stdio.h> #include "cuda_memory.h" //#ifdef WIN32 //#include "win32time.h" //#else #include <sys/time.h> //#endif #include "MirroredArray.h" #include "hash_table_pycuda.cu" //#ifdef LIBRARY //extern "C" //#ifdef WIN32 //__declspec(dllexport) //#endif //#endif //void initCuda(int argc, char **argv) { // CUT_DEVICE_INIT(argc, argv); // // hipDeviceProp_t prop; // CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&prop, 0)); // printf("Device name: %s\n", prop.name); // printf("Max threads per block: %d\n", prop.maxThreadsPerBlock); // printf("Max threads dim: %d %d %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); // printf("Max grid size: %d %d %d \n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); // printf("Shared memory per block: %d Kb\n", (int)(prop.sharedMemPerBlock/1024)); // printf("Total global memory: %d Kb\n", (int)(prop.totalGlobalMem/1024)); // printf("Warp size: %d\n", prop.warpSize); // printf("Memory pitch: %d\n", (int)prop.memPitch); // printf("Registers per block: %d\n", prop.regsPerBlock); // printf("Clock rate: %d\n", prop.clockRate); // printf("Texture alignment: %d\n", (int)prop.textureAlignment); // fflush(stdout); //} // python templating //pd = {{ pd }} //vd = {{ vd }} struct MatrixEntry { int index; float weight; }; // template<int {{ pd }}> __global__ static void createMatrix(const int w, const int h, const float *positions, const float *values, const float *scaleFactor, MatrixEntry *matrix) { // scanline order //const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; // 8x8 blocks const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; const int threadId = threadIdx.y*blockDim.x + threadIdx.x; const int idx = y*w + x; const bool outOfBounds = (x >= w) || (y >= h); float myElevated[{{ pd }}+1]; const float *myPosition = positions + idx*{{ pd }}; int myGreedy[{{ pd }}+1]; int myRank[{{ pd }}+1]; float myBarycentric[{{ pd }}+2]; __shared__ short keys[{{ pd }}*BLOCK_SIZE]; short *myKey = keys + threadId * {{ pd }}; if (!outOfBounds) { myElevated[{{ pd }}] = -{{ pd }}*(myPosition[{{ pd }}-1])*scaleFactor[{{ pd }}-1]; for (int i = {{ pd }}-1; i > 0; i--) { myElevated[i] = (myElevated[i+1] - i*(myPosition[i-1])*scaleFactor[i-1] + (i+2)*(myPosition[i])*scaleFactor[i]); } myElevated[0] = myElevated[1] + 2*(myPosition[0])*scaleFactor[0]; // find the closest zero-colored lattice point // greedily search for the closest zero-colored lattice point signed short sum = 0; for (int i = 0; i <= {{ pd }}; i++) { float v = myElevated[i]*(1.0f/({{ pd }}+1)); float up = ceilf(v) * ({{ pd }}+1); float down = floorf(v) * ({{ pd }}+1); if (up - myElevated[i] < myElevated[i] - down) { myGreedy[i] = (signed short)up; } else { myGreedy[i] = (signed short)down; } sum += myGreedy[i]; } sum /= {{ pd }}+1; // sort differential to find the permutation between this simplex and the canonical one for (int i = 0; i <= {{ pd }}; i++) { myRank[i] = 0; for (int j = 0; j <= {{ pd }}; j++) { if (myElevated[i] - myGreedy[i] < myElevated[j] - myGreedy[j] || (myElevated[i] - myGreedy[i] == myElevated[j] - myGreedy[j] && i > j)) { myRank[i]++; } } } if (sum > 0) { // sum too large, need to bring down the ones with the smallest differential for (int i = 0; i <= {{ pd }}; i++) { if (myRank[i] >= {{ pd }} + 1 - sum) { myGreedy[i] -= {{ pd }}+1; myRank[i] += sum - ({{ pd }}+1); } else { myRank[i] += sum; } } } else if (sum < 0) { // sum too small, need to bring up the ones with largest differential for (int i = 0; i <= {{ pd }}; i++) { if (myRank[i] < -sum) { myGreedy[i] += {{ pd }}+1; myRank[i] += ({{ pd }}+1) + sum; } else { myRank[i] += sum; } } } #ifdef LINEAR_D_MEMORY for (int i = 0; i <= {{ pd }}; i++) { table_zeros[idx*({{ pd }}+1)+i] = myGreedy[i]; table_rank[idx*({{ pd }}+1)+i] = myRank[i]; } #endif // turn delta into barycentric coords for (int i = 0; i <= {{ pd }}+1; i++) { myBarycentric[i] = 0; } for (int i = 0; i <= {{ pd }}; i++) { float delta = (myElevated[i] - myGreedy[i]) * (1.0f/({{ pd }}+1)); myBarycentric[{{ pd }}-myRank[i]] += delta; myBarycentric[{{ pd }}+1-myRank[i]] -= delta; } myBarycentric[0] += 1.0f + myBarycentric[{{ pd }}+1]; } #ifdef USE_ADDITIVE_HASH unsigned int cumulative_hash = hash({{ pd }}, myGreedy); #endif for (int color = 0; color <= {{ pd }}; color++) { // Compute the location of the lattice point explicitly (all but // the last coordinate - it's redundant because they sum to zero) if (!outOfBounds) { for (int i = 0; i < {{ pd }}; i++) { myKey[i] = myGreedy[i] + color; if (myRank[i] > {{ pd }}-color) myKey[i] -= ({{ pd }}+1); } } #ifdef USE_ADDITIVE_HASH for (int i = 0; i < {{ pd }}; i++) { if (myRank[i] == {{ pd }}-color) cumulative_hash += hOffset[i]; } #endif if (!outOfBounds) { MatrixEntry r; #ifdef USE_ADDITIVE_HASH r.index = hashTableInsert({{ pd }}, cumulative_hash, myKey, idx*({{ pd }}+1)+color); #else r.index = hashTableInsert({{ pd }}, myKey, idx*({{ pd }}+1)+color); #endif r.weight = myBarycentric[color]; matrix[idx*({{ pd }}+1) + color] = r; } } } // template<int kd> __global__ static void cleanHashTable(const int kd, int n, MatrixEntry *matrix) { const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x; if (idx >= n) return; // find my hash table entry int *e = table_entries + idx; // Check if I created my own key in the previous phase if (*e >= 0) { // Rehash my key and reset the pointer in order to merge with // any other pixel that created a different entry under the // same key. If the computation was serial this would never // happen, but sometimes race conditions can make the same key // be inserted twice. hashTableRetrieve always returns the // earlier, so it's no problem as long as we rehash now. #ifdef LINEAR_D_MEMORY // Get my key short myKey[kd]; generateKey(kd, *e, myKey); *e = hashTableRetrieve(kd, myKey); #else *e = hashTableRetrieve(kd, table_keys + *e*kd); #endif } } // template<int {{ pd }}, int {{ vd }}> //__global__ static void splat(const int w, const int h, float *values, MatrixEntry *matrix) { // //const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; // // // 8x8 blocks // const int x = threadIdx.x + blockIdx.x * blockDim.x; // const int y = threadIdx.y + (blockIdx.y/({{ pd }}+1)) * blockDim.y; // //const int threadId = threadIdx.y*blockDim.x + threadIdx.x; // const int color = blockIdx.y % ({{ pd }}+1); // const int idx = y*w + x; // const bool outOfBounds = (x >= w) || (y >= h); // // if (outOfBounds) return; // // float *myValue = values + idx*{{ vd }}; // // MatrixEntry r = matrix[idx*({{ pd }}+1)+color]; // matrix[idx*({{ pd }}+1)+color].index = r.index = table_entries[r.index]; // float *val = table_values + r.index*({{ vd }}+1); // for (int j = 0; j < {{ vd }}; j++) { // atomicAdd(val+j, myValue[j]*r.weight); // } // atomicAdd(val+{{ vd }}, r.weight); //} // template<int {{ pd }}, int {{ vd }}> __global__ static void splatCache(const int w, const int h, float *values, MatrixEntry *matrix) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + (blockIdx.y/({{ pd }}+1)) * blockDim.y; const int threadId = threadIdx.y*blockDim.x + threadIdx.x; const int color = blockIdx.y % ({{ pd }}+1); const int idx = y*w + x; const bool outOfBounds = (x >= w) || (y >= h); __shared__ int sharedOffsets[BLOCK_SIZE]; __shared__ float sharedValues[BLOCK_SIZE*({{ vd }}+1)]; int myOffset = -1; float *myValue = sharedValues + threadId*({{ vd }}+1); if (!outOfBounds) { float *value = values + idx*{{ vd }}; MatrixEntry r = matrix[idx*({{ pd }}+1)+color]; // convert the matrix entry from a pointer into the entries array to a pointer into the keys/values array matrix[idx*({{ pd }}+1)+color].index = r.index = table_entries[r.index]; // record the offset into the keys/values array in shared space myOffset = sharedOffsets[threadId] = r.index*({{ vd }}+1); for (int j = 0; j < {{ vd }}; j++) { myValue[j] = value[j]*r.weight; } myValue[{{ vd }}] = r.weight; } else { sharedOffsets[threadId] = -1; } __syncthreads(); // am I the first thread in this block to care about this key? if (outOfBounds) return; for (int i = 0; i < BLOCK_SIZE; i++) { if (i < threadId) { if (myOffset == sharedOffsets[i]) { // somebody else with higher priority cares about this key return; } } else if (i > threadId) { if (myOffset == sharedOffsets[i]) { // someone else with lower priority cares about this key, accumulate it into mine for (int j = 0; j <= {{ vd }}; j++) { sharedValues[threadId*({{ vd }}+1) + j] += sharedValues[i*({{ vd }}+1) + j]; } } } } // only the threads with something to write to main memory are still going float *val = table_values + myOffset; for (int j = 0; j <= {{ vd }}; j++) { atomicAdd(val+j, myValue[j]); } } // template<int {{ pd }}, int {{ vd }}> __global__ static void blur(int n, float *newValues, int color, MatrixEntry *matrix) { const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x; if (idx >= n) return; // Check if I'm valid if (matrix[idx].index != idx) return; // find my key and the keys of my neighbours short myKey[{{ pd }}+1]; short np[{{ pd }}+1]; short nm[{{ pd }}+1]; #ifdef LINEAR_D_MEMORY generateKey({{ pd }}, idx, myKey); for (int i = 0; i < {{ pd }}; i++) { np[i] = myKey[i]+1; nm[i] = myKey[i]-1; } #else for (int i = 0; i < {{ pd }}; i++) { myKey[i] = table_keys[idx*{{ pd }}+i]; np[i] = myKey[i]+1; nm[i] = myKey[i]-1; } #endif np[color] -= {{ pd }}+1; nm[color] += {{ pd }}+1; #ifdef USE_ADDITIVE_HASH unsigned int hCurrent = hash({{ pd }}, myKey); int offNp = hashTableRetrieveWithHash({{ pd }}, hCurrent+hOffset[color],np); int offNm = hashTableRetrieveWithHash({{ pd }}, hCurrent-hOffset[color],nm); #else int offNp = hashTableRetrieve({{ pd }}, np); int offNm = hashTableRetrieve({{ pd }}, nm); #endif float *valMe = table_values + ({{ vd }}+1)*idx; float *valNp = table_values + ({{ vd }}+1)*offNp; float *valNm = table_values + ({{ vd }}+1)*offNm; float *valOut = newValues + ({{ vd }}+1)*idx; if (offNp >= 0 && offNm >= 0) { for (int i = 0; i <= {{ vd }}; i++) { valOut[i] = (valNp[i] + (valMe[i]*2) + valNm[i])/4; } } else if (offNp >= 0) { for (int i = 0; i <= {{ vd }}; i++) { valOut[i] = (valNp[i] + (valMe[i]*2))/4; } } else if (offNm >= 0) { for (int i = 0; i <= {{ vd }}; i++) { valOut[i] = (valNm[i] + (valMe[i]*2))/4; } } else { for (int i = 0; i <= {{ vd }}; i++) { valOut[i] = valMe[i]*2; } } } // template<int {{ pd }}, int {{ vd }}> __global__ static void slice(const int w, const int h, float *values, MatrixEntry *matrix) { //const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; const int threadId = threadIdx.y*blockDim.x + threadIdx.x; const int idx = y*w + x; const bool outOfBounds = (x >= w) || (y >= h); if (outOfBounds) return; __shared__ float localValue[BLOCK_SIZE*{{ vd }}]; float *myValue = localValue + threadId*{{ vd }}; float myWeight = 0; for (int i = 0; i < {{ vd }}; i++) { myValue[i] = 0; } for (int i = 0; i <= {{ pd }}; i++) { MatrixEntry r = matrix[idx*({{ pd }}+1) + i]; float *val = table_values + r.index*({{ vd }}+1); for (int j = 0; j < {{ vd }}; j++) { myValue[j] += r.weight*val[j]; } myWeight += r.weight*val[{{ vd }}]; } myWeight = 1.0f/myWeight; for (int j = 0; j < {{ vd }}; j++) values[idx*{{ vd }} + j] = myValue[j]*myWeight; } // template<int {{ vd }}, int {{ pd }}> void filter_(float *im, float *ref, int w, int h, bool accurate) { int n = w*h; float blurVariance = accurate ? 0.5 : 0; MirroredArray<float> scaleFactor({{ pd }}); //MirroredArray<float> offset({{ pd }}); for (int i = 0; i < {{ pd }}; i++) { scaleFactor.host[i] = ({{ pd }}+1)*sqrtf((1.0/6 + blurVariance)/((i+1)*(i+2))); //offset.host[i] = ((double)rand()/RAND_MAX)*({{ pd }}+1)*2; } scaleFactor.hostToDevice(); //offset.hostToDevice(); MirroredArray<float> values(im, n*{{ vd }}); // data, size MirroredArray<float> positions(ref, n*{{ pd }}); MirroredArray<MatrixEntry> matrix(n*({{ pd }}+1)); createHashTable({{ pd }}, {{ vd }}+1, n*({{ pd }}+1)); // createHashTable<{{ pd }}, {{ vd }}+1>(n*({{ pd }}+1)); // Populate constant memory for hash helpers unsigned long long int __host_two32 = ((unsigned long long int)1)<<32; unsigned int __host_div_c = 2*(n*({{ pd }}+1)); unsigned int __host_div_l = ceilf(logf((float)__host_div_c) / logf(2.0f)); unsigned int __host_div_m = (__host_two32<<__host_div_l)/__host_div_c - __host_two32 + 1; CUDA_SAFE_CALL(hipMemcpyToSymbol((char*)&__div_c, &__host_div_c, sizeof(unsigned int))); CUDA_SAFE_CALL(hipMemcpyToSymbol((char*)&__div_l, &__host_div_l, sizeof(unsigned int))); CUDA_SAFE_CALL(hipMemcpyToSymbol((char*)&__div_m, &__host_div_m, sizeof(unsigned int))); // Populate constant memory with hash of offset vectors unsigned int hOffset_host[{{ pd }}+1]; signed short offset[{{ pd }}+1]; for (int i = 0; i < {{ pd }}; offset[i] = 1, i++); for (int i = 0; i <= {{ pd }}; i++) { offset[i] -= {{ pd }}+1; hOffset_host[i] = hash({{ pd }}, offset); offset[i] += {{ pd }}+1; } CUDA_SAFE_CALL(hipMemcpyToSymbol((char*)&hOffset, &hOffset_host, sizeof(unsigned int)*({{ pd }}+1))); dim3 blocks((w-1)/8+1, (h-1)/8+1, 1); dim3 blockSize(8, 8, 1); timeval t[7]; gettimeofday(t+0, NULL); // hipLaunchKernelGGL(( createMatrix), dim3(blocks), dim3(blockSize), 0, 0, {{ pd }}, w, h, positions.device, hipLaunchKernelGGL(( createMatrix), dim3(blocks), dim3(blockSize), 0, 0, w, h, positions.device, values.device, scaleFactor.device, matrix.device); CUT_CHECK_ERROR("Matrix creation failed\n"); gettimeofday(t+1, NULL); //HashTable hostTable; //int hashTableEntries; //CUDA_SAFE_CALL(hipMemcpy(&hostTable, table, sizeof(HashTable), hipMemcpyDeviceToHost)); //CUDA_SAFE_CALL(hipMemcpy(&hashTableEntries, hostTable_filled, sizeof(int), hipMemcpyDeviceToHost)); //printf("Hash table has %d entries\n", hashTableEntries); // fix duplicate hash table entries int cleanBlockSize = 32; dim3 cleanBlocks((n-1)/cleanBlockSize+1, 2*({{ pd }}+1), 1); hipLaunchKernelGGL(( cleanHashTable), dim3(cleanBlocks), dim3(cleanBlockSize), 0, 0, {{ pd }}, 2*n*({{ pd }}+1), matrix .device); CUT_CHECK_ERROR("clean failed\n"); gettimeofday(t+2, NULL); // splat splits by color, so extend the y coordinate to our blocks to represent that blocks.y *= {{ pd }}+1; hipLaunchKernelGGL(( splatCache), dim3(blocks), dim3(blockSize), 0, 0, w, h, values.device, matrix.device); // splatCache<{{ pd }}, {{ vd }}><<<blocks, blockSize>>>(w, h, values.device, matrix.device); //splat<{{ pd }}, {{ vd }}><<<blocks, blockSize>>>(w, h, values.device, matrix.device); CUT_CHECK_ERROR("splat failed\n"); gettimeofday(t+3, NULL); if (accurate) { float *newValues; allocateCudaMemory((void**)&(newValues), n*({{ pd }}+1)*({{ vd }}+1)*sizeof(float)); CUDA_SAFE_CALL(hipMemset((void *)newValues, 0, n*({{ pd }}+1)*({{ vd }}+1)*sizeof(float))); for (int color = 0; color <= {{ pd }}; color++) { hipLaunchKernelGGL(( blur), dim3(cleanBlocks), dim3(cleanBlockSize), 0, 0, n*({{ pd }}+1), newValues, color, matrix.device); // blur<{{ pd }}, {{ vd }}><<<cleanBlocks, cleanBlockSize>>>(n*({{ pd }}+1), newValues, matrix.device, color); CUT_CHECK_ERROR("blur failed\n"); newValues = swapHashTableValues(newValues); } } gettimeofday(t+4, NULL); blocks.y /= ({{ pd }}+1); hipLaunchKernelGGL(( slice), dim3(blocks), dim3(blockSize), 0, 0, w, h, values.device, matrix.device); // slice<{{ pd }}, {{ vd }}><<<blocks, blockSize>>>(w, h, values.device, matrix.device); CUT_CHECK_ERROR("slice failed\n"); gettimeofday(t+5, NULL); double total = (t[5].tv_sec - t[0].tv_sec)*1000.0 + (t[5].tv_usec - t[0].tv_usec)/1000.0; printf("Total time: %3.3f ms\n", total); char *names[5] = {"Create", "Clean ", "Splat ", "Blur ", "Slice "}; for (int i = 1; i < 6; i++) { printf("%s: %3.3f ms\n", names[i-1], (t[i].tv_sec - t[i-1].tv_sec)*1000.0 + (t[i].tv_usec - t[i-1].tv_usec)/1000.0); } printf("Total GPU memory usage: %u bytes\n", (unsigned int)GPU_MEMORY_ALLOCATION); values.deviceToHost(); destroyHashTable(); } //#ifdef LIBRARY //extern "C" //#ifdef WIN32 //__declspec(dllexport) //#endif //#endif //void filter(float *im, float *ref, int {{ pd }}, int {{ vd }}, int w, int h, bool accurate) { // switch ({{ vd }}*1000 + {{ pd }}) { // case 1001: filter_<1, 1>(im, ref, w, h, accurate); break; // case 2001: filter_<2, 1>(im, ref, w, h, accurate); break; // case 3001: filter_<3, 1>(im, ref, w, h, accurate); break; // case 1002: filter_<1, 2>(im, ref, w, h, accurate); break; // case 2002: filter_<2, 2>(im, ref, w, h, accurate); break; // case 3002: filter_<3, 2>(im, ref, w, h, accurate); break; // case 1003: filter_<1, 3>(im, ref, w, h, accurate); break; // case 2003: filter_<2, 3>(im, ref, w, h, accurate); break; // case 3003: filter_<3, 3>(im, ref, w, h, accurate); break; // case 1004: filter_<1, 4>(im, ref, w, h, accurate); break; // case 2004: filter_<2, 4>(im, ref, w, h, accurate); break; // case 3004: filter_<3, 4>(im, ref, w, h, accurate); break; // case 1005: filter_<1, 5>(im, ref, w, h, accurate); break; // case 2005: filter_<2, 5>(im, ref, w, h, accurate); break; // case 3005: filter_<3, 5>(im, ref, w, h, accurate); break; // case 1006: filter_<1, 6>(im, ref, w, h, accurate); break; // case 2006: filter_<2, 6>(im, ref, w, h, accurate); break; // case 3006: filter_<3, 6>(im, ref, w, h, accurate); break; // case 1007: filter_<1, 7>(im, ref, w, h, accurate); break; // case 2007: filter_<2, 7>(im, ref, w, h, accurate); break; // case 3007: filter_<3, 7>(im, ref, w, h, accurate); break; // case 1008: filter_<1, 8>(im, ref, w, h, accurate); break; // case 2008: filter_<2, 8>(im, ref, w, h, accurate); break; // case 3008: filter_<3, 8>(im, ref, w, h, accurate); break; // case 1009: filter_<1, 9>(im, ref, w, h, accurate); break; // case 2009: filter_<2, 9>(im, ref, w, h, accurate); break; // case 3009: filter_<3, 9>(im, ref, w, h, accurate); break; // case 1010: filter_<1, 10>(im, ref, w, h, accurate); break; // case 2010: filter_<2, 10>(im, ref, w, h, accurate); break; // case 3010: filter_<3, 10>(im, ref, w, h, accurate); break; // case 1011: filter_<1, 11>(im, ref, w, h, accurate); break; // case 2011: filter_<2, 11>(im, ref, w, h, accurate); break; // case 3011: filter_<3, 11>(im, ref, w, h, accurate); break; // case 1012: filter_<1, 12>(im, ref, w, h, accurate); break; // case 2012: filter_<2, 12>(im, ref, w, h, accurate); break; // case 3012: filter_<3, 12>(im, ref, w, h, accurate); break; // case 1013: filter_<1, 13>(im, ref, w, h, accurate); break; // case 2013: filter_<2, 13>(im, ref, w, h, accurate); break; // case 3013: filter_<3, 13>(im, ref, w, h, accurate); break; // case 1014: filter_<1, 14>(im, ref, w, h, accurate); break; // case 2014: filter_<2, 14>(im, ref, w, h, accurate); break; // case 3014: filter_<3, 14>(im, ref, w, h, accurate); break; // case 1015: filter_<1, 15>(im, ref, w, h, accurate); break; // case 2015: filter_<2, 15>(im, ref, w, h, accurate); break; // case 3015: filter_<3, 15>(im, ref, w, h, accurate); break; // case 1016: filter_<1, 16>(im, ref, w, h, accurate); break; // case 2016: filter_<2, 16>(im, ref, w, h, accurate); break; // case 3016: filter_<3, 16>(im, ref, w, h, accurate); break; // default: // printf("Unsupported channel counts. Reference image must have 1 to 16 channels, input image must have 1 to 3 channels\n"); // } //} // // // // //// Below here is a program for testing it on the command line //#ifndef LIBRARY // //struct header { // int frames, width, height, channels, type; //}; // //void loadTMP(const char *filename, float **data, header *h) { // FILE *f = fopen(filename, "rb"); // fread(h, sizeof(header), 1, f); // size_t size = h->frames*h->width*h->channels*h->height; // *data = new float[size]; // fread(*data, sizeof(float), size, f); // fclose(f); //} // //void saveTMP(const char *filename, float *data, header h) { // FILE *f = fopen(filename, "wb"); // fwrite(&h, sizeof(header), 1, f); // size_t size = h.frames*h.width*h.channels*h.height; // fwrite(data, sizeof(float), size, f); // fclose(f); //} // //int main(int argc, char **argv) { // initCuda(1, argv); // // if (argc < 4) { // printf("Usage: permutohedral input.tmp ref.tmp output.tmp {accurate}\n"); // return 1; // } // // bool accurate = argc == 5; // // srand(time(NULL)); // // float *im, *ref; // header imHeader, refHeader; // loadTMP(argv[1], &im, &imHeader); // loadTMP(argv[2], &ref, &refHeader); // // filter(im, ref, refHeader.channels, imHeader.channels, imHeader.width, imHeader.height, accurate); // // saveTMP(argv[3], im, imHeader); // // return 0; //} // //#endif
cc20e2621b2a23075496a1d3825f7b7c7dfe70e7.cu
#define BLOCK_SIZE 64 #define _DEBUG #include "cutil.h" #include <cuda_runtime.h> #include <stdio.h> #include "cuda_memory.h" //#ifdef WIN32 //#include "win32time.h" //#else #include <sys/time.h> //#endif #include "MirroredArray.h" #include "hash_table_pycuda.cu" //#ifdef LIBRARY //extern "C" //#ifdef WIN32 //__declspec(dllexport) //#endif //#endif //void initCuda(int argc, char **argv) { // CUT_DEVICE_INIT(argc, argv); // // cudaDeviceProp prop; // CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&prop, 0)); // printf("Device name: %s\n", prop.name); // printf("Max threads per block: %d\n", prop.maxThreadsPerBlock); // printf("Max threads dim: %d %d %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); // printf("Max grid size: %d %d %d \n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); // printf("Shared memory per block: %d Kb\n", (int)(prop.sharedMemPerBlock/1024)); // printf("Total global memory: %d Kb\n", (int)(prop.totalGlobalMem/1024)); // printf("Warp size: %d\n", prop.warpSize); // printf("Memory pitch: %d\n", (int)prop.memPitch); // printf("Registers per block: %d\n", prop.regsPerBlock); // printf("Clock rate: %d\n", prop.clockRate); // printf("Texture alignment: %d\n", (int)prop.textureAlignment); // fflush(stdout); //} // python templating //pd = {{ pd }} //vd = {{ vd }} struct MatrixEntry { int index; float weight; }; // template<int {{ pd }}> __global__ static void createMatrix(const int w, const int h, const float *positions, const float *values, const float *scaleFactor, MatrixEntry *matrix) { // scanline order //const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; // 8x8 blocks const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; const int threadId = threadIdx.y*blockDim.x + threadIdx.x; const int idx = y*w + x; const bool outOfBounds = (x >= w) || (y >= h); float myElevated[{{ pd }}+1]; const float *myPosition = positions + idx*{{ pd }}; int myGreedy[{{ pd }}+1]; int myRank[{{ pd }}+1]; float myBarycentric[{{ pd }}+2]; __shared__ short keys[{{ pd }}*BLOCK_SIZE]; short *myKey = keys + threadId * {{ pd }}; if (!outOfBounds) { myElevated[{{ pd }}] = -{{ pd }}*(myPosition[{{ pd }}-1])*scaleFactor[{{ pd }}-1]; for (int i = {{ pd }}-1; i > 0; i--) { myElevated[i] = (myElevated[i+1] - i*(myPosition[i-1])*scaleFactor[i-1] + (i+2)*(myPosition[i])*scaleFactor[i]); } myElevated[0] = myElevated[1] + 2*(myPosition[0])*scaleFactor[0]; // find the closest zero-colored lattice point // greedily search for the closest zero-colored lattice point signed short sum = 0; for (int i = 0; i <= {{ pd }}; i++) { float v = myElevated[i]*(1.0f/({{ pd }}+1)); float up = ceilf(v) * ({{ pd }}+1); float down = floorf(v) * ({{ pd }}+1); if (up - myElevated[i] < myElevated[i] - down) { myGreedy[i] = (signed short)up; } else { myGreedy[i] = (signed short)down; } sum += myGreedy[i]; } sum /= {{ pd }}+1; // sort differential to find the permutation between this simplex and the canonical one for (int i = 0; i <= {{ pd }}; i++) { myRank[i] = 0; for (int j = 0; j <= {{ pd }}; j++) { if (myElevated[i] - myGreedy[i] < myElevated[j] - myGreedy[j] || (myElevated[i] - myGreedy[i] == myElevated[j] - myGreedy[j] && i > j)) { myRank[i]++; } } } if (sum > 0) { // sum too large, need to bring down the ones with the smallest differential for (int i = 0; i <= {{ pd }}; i++) { if (myRank[i] >= {{ pd }} + 1 - sum) { myGreedy[i] -= {{ pd }}+1; myRank[i] += sum - ({{ pd }}+1); } else { myRank[i] += sum; } } } else if (sum < 0) { // sum too small, need to bring up the ones with largest differential for (int i = 0; i <= {{ pd }}; i++) { if (myRank[i] < -sum) { myGreedy[i] += {{ pd }}+1; myRank[i] += ({{ pd }}+1) + sum; } else { myRank[i] += sum; } } } #ifdef LINEAR_D_MEMORY for (int i = 0; i <= {{ pd }}; i++) { table_zeros[idx*({{ pd }}+1)+i] = myGreedy[i]; table_rank[idx*({{ pd }}+1)+i] = myRank[i]; } #endif // turn delta into barycentric coords for (int i = 0; i <= {{ pd }}+1; i++) { myBarycentric[i] = 0; } for (int i = 0; i <= {{ pd }}; i++) { float delta = (myElevated[i] - myGreedy[i]) * (1.0f/({{ pd }}+1)); myBarycentric[{{ pd }}-myRank[i]] += delta; myBarycentric[{{ pd }}+1-myRank[i]] -= delta; } myBarycentric[0] += 1.0f + myBarycentric[{{ pd }}+1]; } #ifdef USE_ADDITIVE_HASH unsigned int cumulative_hash = hash({{ pd }}, myGreedy); #endif for (int color = 0; color <= {{ pd }}; color++) { // Compute the location of the lattice point explicitly (all but // the last coordinate - it's redundant because they sum to zero) if (!outOfBounds) { for (int i = 0; i < {{ pd }}; i++) { myKey[i] = myGreedy[i] + color; if (myRank[i] > {{ pd }}-color) myKey[i] -= ({{ pd }}+1); } } #ifdef USE_ADDITIVE_HASH for (int i = 0; i < {{ pd }}; i++) { if (myRank[i] == {{ pd }}-color) cumulative_hash += hOffset[i]; } #endif if (!outOfBounds) { MatrixEntry r; #ifdef USE_ADDITIVE_HASH r.index = hashTableInsert({{ pd }}, cumulative_hash, myKey, idx*({{ pd }}+1)+color); #else r.index = hashTableInsert({{ pd }}, myKey, idx*({{ pd }}+1)+color); #endif r.weight = myBarycentric[color]; matrix[idx*({{ pd }}+1) + color] = r; } } } // template<int kd> __global__ static void cleanHashTable(const int kd, int n, MatrixEntry *matrix) { const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x; if (idx >= n) return; // find my hash table entry int *e = table_entries + idx; // Check if I created my own key in the previous phase if (*e >= 0) { // Rehash my key and reset the pointer in order to merge with // any other pixel that created a different entry under the // same key. If the computation was serial this would never // happen, but sometimes race conditions can make the same key // be inserted twice. hashTableRetrieve always returns the // earlier, so it's no problem as long as we rehash now. #ifdef LINEAR_D_MEMORY // Get my key short myKey[kd]; generateKey(kd, *e, myKey); *e = hashTableRetrieve(kd, myKey); #else *e = hashTableRetrieve(kd, table_keys + *e*kd); #endif } } // template<int {{ pd }}, int {{ vd }}> //__global__ static void splat(const int w, const int h, float *values, MatrixEntry *matrix) { // //const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; // // // 8x8 blocks // const int x = threadIdx.x + blockIdx.x * blockDim.x; // const int y = threadIdx.y + (blockIdx.y/({{ pd }}+1)) * blockDim.y; // //const int threadId = threadIdx.y*blockDim.x + threadIdx.x; // const int color = blockIdx.y % ({{ pd }}+1); // const int idx = y*w + x; // const bool outOfBounds = (x >= w) || (y >= h); // // if (outOfBounds) return; // // float *myValue = values + idx*{{ vd }}; // // MatrixEntry r = matrix[idx*({{ pd }}+1)+color]; // matrix[idx*({{ pd }}+1)+color].index = r.index = table_entries[r.index]; // float *val = table_values + r.index*({{ vd }}+1); // for (int j = 0; j < {{ vd }}; j++) { // atomicAdd(val+j, myValue[j]*r.weight); // } // atomicAdd(val+{{ vd }}, r.weight); //} // template<int {{ pd }}, int {{ vd }}> __global__ static void splatCache(const int w, const int h, float *values, MatrixEntry *matrix) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + (blockIdx.y/({{ pd }}+1)) * blockDim.y; const int threadId = threadIdx.y*blockDim.x + threadIdx.x; const int color = blockIdx.y % ({{ pd }}+1); const int idx = y*w + x; const bool outOfBounds = (x >= w) || (y >= h); __shared__ int sharedOffsets[BLOCK_SIZE]; __shared__ float sharedValues[BLOCK_SIZE*({{ vd }}+1)]; int myOffset = -1; float *myValue = sharedValues + threadId*({{ vd }}+1); if (!outOfBounds) { float *value = values + idx*{{ vd }}; MatrixEntry r = matrix[idx*({{ pd }}+1)+color]; // convert the matrix entry from a pointer into the entries array to a pointer into the keys/values array matrix[idx*({{ pd }}+1)+color].index = r.index = table_entries[r.index]; // record the offset into the keys/values array in shared space myOffset = sharedOffsets[threadId] = r.index*({{ vd }}+1); for (int j = 0; j < {{ vd }}; j++) { myValue[j] = value[j]*r.weight; } myValue[{{ vd }}] = r.weight; } else { sharedOffsets[threadId] = -1; } __syncthreads(); // am I the first thread in this block to care about this key? if (outOfBounds) return; for (int i = 0; i < BLOCK_SIZE; i++) { if (i < threadId) { if (myOffset == sharedOffsets[i]) { // somebody else with higher priority cares about this key return; } } else if (i > threadId) { if (myOffset == sharedOffsets[i]) { // someone else with lower priority cares about this key, accumulate it into mine for (int j = 0; j <= {{ vd }}; j++) { sharedValues[threadId*({{ vd }}+1) + j] += sharedValues[i*({{ vd }}+1) + j]; } } } } // only the threads with something to write to main memory are still going float *val = table_values + myOffset; for (int j = 0; j <= {{ vd }}; j++) { atomicAdd(val+j, myValue[j]); } } // template<int {{ pd }}, int {{ vd }}> __global__ static void blur(int n, float *newValues, int color, MatrixEntry *matrix) { const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x; if (idx >= n) return; // Check if I'm valid if (matrix[idx].index != idx) return; // find my key and the keys of my neighbours short myKey[{{ pd }}+1]; short np[{{ pd }}+1]; short nm[{{ pd }}+1]; #ifdef LINEAR_D_MEMORY generateKey({{ pd }}, idx, myKey); for (int i = 0; i < {{ pd }}; i++) { np[i] = myKey[i]+1; nm[i] = myKey[i]-1; } #else for (int i = 0; i < {{ pd }}; i++) { myKey[i] = table_keys[idx*{{ pd }}+i]; np[i] = myKey[i]+1; nm[i] = myKey[i]-1; } #endif np[color] -= {{ pd }}+1; nm[color] += {{ pd }}+1; #ifdef USE_ADDITIVE_HASH unsigned int hCurrent = hash({{ pd }}, myKey); int offNp = hashTableRetrieveWithHash({{ pd }}, hCurrent+hOffset[color],np); int offNm = hashTableRetrieveWithHash({{ pd }}, hCurrent-hOffset[color],nm); #else int offNp = hashTableRetrieve({{ pd }}, np); int offNm = hashTableRetrieve({{ pd }}, nm); #endif float *valMe = table_values + ({{ vd }}+1)*idx; float *valNp = table_values + ({{ vd }}+1)*offNp; float *valNm = table_values + ({{ vd }}+1)*offNm; float *valOut = newValues + ({{ vd }}+1)*idx; if (offNp >= 0 && offNm >= 0) { for (int i = 0; i <= {{ vd }}; i++) { valOut[i] = (valNp[i] + (valMe[i]*2) + valNm[i])/4; } } else if (offNp >= 0) { for (int i = 0; i <= {{ vd }}; i++) { valOut[i] = (valNp[i] + (valMe[i]*2))/4; } } else if (offNm >= 0) { for (int i = 0; i <= {{ vd }}; i++) { valOut[i] = (valNm[i] + (valMe[i]*2))/4; } } else { for (int i = 0; i <= {{ vd }}; i++) { valOut[i] = valMe[i]*2; } } } // template<int {{ pd }}, int {{ vd }}> __global__ static void slice(const int w, const int h, float *values, MatrixEntry *matrix) { //const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; const int threadId = threadIdx.y*blockDim.x + threadIdx.x; const int idx = y*w + x; const bool outOfBounds = (x >= w) || (y >= h); if (outOfBounds) return; __shared__ float localValue[BLOCK_SIZE*{{ vd }}]; float *myValue = localValue + threadId*{{ vd }}; float myWeight = 0; for (int i = 0; i < {{ vd }}; i++) { myValue[i] = 0; } for (int i = 0; i <= {{ pd }}; i++) { MatrixEntry r = matrix[idx*({{ pd }}+1) + i]; float *val = table_values + r.index*({{ vd }}+1); for (int j = 0; j < {{ vd }}; j++) { myValue[j] += r.weight*val[j]; } myWeight += r.weight*val[{{ vd }}]; } myWeight = 1.0f/myWeight; for (int j = 0; j < {{ vd }}; j++) values[idx*{{ vd }} + j] = myValue[j]*myWeight; } // template<int {{ vd }}, int {{ pd }}> void filter_(float *im, float *ref, int w, int h, bool accurate) { int n = w*h; float blurVariance = accurate ? 0.5 : 0; MirroredArray<float> scaleFactor({{ pd }}); //MirroredArray<float> offset({{ pd }}); for (int i = 0; i < {{ pd }}; i++) { scaleFactor.host[i] = ({{ pd }}+1)*sqrtf((1.0/6 + blurVariance)/((i+1)*(i+2))); //offset.host[i] = ((double)rand()/RAND_MAX)*({{ pd }}+1)*2; } scaleFactor.hostToDevice(); //offset.hostToDevice(); MirroredArray<float> values(im, n*{{ vd }}); // data, size MirroredArray<float> positions(ref, n*{{ pd }}); MirroredArray<MatrixEntry> matrix(n*({{ pd }}+1)); createHashTable({{ pd }}, {{ vd }}+1, n*({{ pd }}+1)); // createHashTable<{{ pd }}, {{ vd }}+1>(n*({{ pd }}+1)); // Populate constant memory for hash helpers unsigned long long int __host_two32 = ((unsigned long long int)1)<<32; unsigned int __host_div_c = 2*(n*({{ pd }}+1)); unsigned int __host_div_l = ceilf(logf((float)__host_div_c) / logf(2.0f)); unsigned int __host_div_m = (__host_two32<<__host_div_l)/__host_div_c - __host_two32 + 1; CUDA_SAFE_CALL(cudaMemcpyToSymbol((char*)&__div_c, &__host_div_c, sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMemcpyToSymbol((char*)&__div_l, &__host_div_l, sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMemcpyToSymbol((char*)&__div_m, &__host_div_m, sizeof(unsigned int))); // Populate constant memory with hash of offset vectors unsigned int hOffset_host[{{ pd }}+1]; signed short offset[{{ pd }}+1]; for (int i = 0; i < {{ pd }}; offset[i] = 1, i++); for (int i = 0; i <= {{ pd }}; i++) { offset[i] -= {{ pd }}+1; hOffset_host[i] = hash({{ pd }}, offset); offset[i] += {{ pd }}+1; } CUDA_SAFE_CALL(cudaMemcpyToSymbol((char*)&hOffset, &hOffset_host, sizeof(unsigned int)*({{ pd }}+1))); dim3 blocks((w-1)/8+1, (h-1)/8+1, 1); dim3 blockSize(8, 8, 1); timeval t[7]; gettimeofday(t+0, NULL); // createMatrix<<<blocks, blockSize>>>({{ pd }}, w, h, positions.device, createMatrix<<<blocks, blockSize>>>(w, h, positions.device, values.device, scaleFactor.device, matrix.device); CUT_CHECK_ERROR("Matrix creation failed\n"); gettimeofday(t+1, NULL); //HashTable hostTable; //int hashTableEntries; //CUDA_SAFE_CALL(cudaMemcpy(&hostTable, table, sizeof(HashTable), cudaMemcpyDeviceToHost)); //CUDA_SAFE_CALL(cudaMemcpy(&hashTableEntries, hostTable_filled, sizeof(int), cudaMemcpyDeviceToHost)); //printf("Hash table has %d entries\n", hashTableEntries); // fix duplicate hash table entries int cleanBlockSize = 32; dim3 cleanBlocks((n-1)/cleanBlockSize+1, 2*({{ pd }}+1), 1); cleanHashTable<<<cleanBlocks, cleanBlockSize>>>({{ pd }}, 2*n*({{ pd }}+1), matrix .device); CUT_CHECK_ERROR("clean failed\n"); gettimeofday(t+2, NULL); // splat splits by color, so extend the y coordinate to our blocks to represent that blocks.y *= {{ pd }}+1; splatCache<<<blocks, blockSize>>>(w, h, values.device, matrix.device); // splatCache<{{ pd }}, {{ vd }}><<<blocks, blockSize>>>(w, h, values.device, matrix.device); //splat<{{ pd }}, {{ vd }}><<<blocks, blockSize>>>(w, h, values.device, matrix.device); CUT_CHECK_ERROR("splat failed\n"); gettimeofday(t+3, NULL); if (accurate) { float *newValues; allocateCudaMemory((void**)&(newValues), n*({{ pd }}+1)*({{ vd }}+1)*sizeof(float)); CUDA_SAFE_CALL(cudaMemset((void *)newValues, 0, n*({{ pd }}+1)*({{ vd }}+1)*sizeof(float))); for (int color = 0; color <= {{ pd }}; color++) { blur<<<cleanBlocks, cleanBlockSize>>>(n*({{ pd }}+1), newValues, color, matrix.device); // blur<{{ pd }}, {{ vd }}><<<cleanBlocks, cleanBlockSize>>>(n*({{ pd }}+1), newValues, matrix.device, color); CUT_CHECK_ERROR("blur failed\n"); newValues = swapHashTableValues(newValues); } } gettimeofday(t+4, NULL); blocks.y /= ({{ pd }}+1); slice<<<blocks, blockSize>>>(w, h, values.device, matrix.device); // slice<{{ pd }}, {{ vd }}><<<blocks, blockSize>>>(w, h, values.device, matrix.device); CUT_CHECK_ERROR("slice failed\n"); gettimeofday(t+5, NULL); double total = (t[5].tv_sec - t[0].tv_sec)*1000.0 + (t[5].tv_usec - t[0].tv_usec)/1000.0; printf("Total time: %3.3f ms\n", total); char *names[5] = {"Create", "Clean ", "Splat ", "Blur ", "Slice "}; for (int i = 1; i < 6; i++) { printf("%s: %3.3f ms\n", names[i-1], (t[i].tv_sec - t[i-1].tv_sec)*1000.0 + (t[i].tv_usec - t[i-1].tv_usec)/1000.0); } printf("Total GPU memory usage: %u bytes\n", (unsigned int)GPU_MEMORY_ALLOCATION); values.deviceToHost(); destroyHashTable(); } //#ifdef LIBRARY //extern "C" //#ifdef WIN32 //__declspec(dllexport) //#endif //#endif //void filter(float *im, float *ref, int {{ pd }}, int {{ vd }}, int w, int h, bool accurate) { // switch ({{ vd }}*1000 + {{ pd }}) { // case 1001: filter_<1, 1>(im, ref, w, h, accurate); break; // case 2001: filter_<2, 1>(im, ref, w, h, accurate); break; // case 3001: filter_<3, 1>(im, ref, w, h, accurate); break; // case 1002: filter_<1, 2>(im, ref, w, h, accurate); break; // case 2002: filter_<2, 2>(im, ref, w, h, accurate); break; // case 3002: filter_<3, 2>(im, ref, w, h, accurate); break; // case 1003: filter_<1, 3>(im, ref, w, h, accurate); break; // case 2003: filter_<2, 3>(im, ref, w, h, accurate); break; // case 3003: filter_<3, 3>(im, ref, w, h, accurate); break; // case 1004: filter_<1, 4>(im, ref, w, h, accurate); break; // case 2004: filter_<2, 4>(im, ref, w, h, accurate); break; // case 3004: filter_<3, 4>(im, ref, w, h, accurate); break; // case 1005: filter_<1, 5>(im, ref, w, h, accurate); break; // case 2005: filter_<2, 5>(im, ref, w, h, accurate); break; // case 3005: filter_<3, 5>(im, ref, w, h, accurate); break; // case 1006: filter_<1, 6>(im, ref, w, h, accurate); break; // case 2006: filter_<2, 6>(im, ref, w, h, accurate); break; // case 3006: filter_<3, 6>(im, ref, w, h, accurate); break; // case 1007: filter_<1, 7>(im, ref, w, h, accurate); break; // case 2007: filter_<2, 7>(im, ref, w, h, accurate); break; // case 3007: filter_<3, 7>(im, ref, w, h, accurate); break; // case 1008: filter_<1, 8>(im, ref, w, h, accurate); break; // case 2008: filter_<2, 8>(im, ref, w, h, accurate); break; // case 3008: filter_<3, 8>(im, ref, w, h, accurate); break; // case 1009: filter_<1, 9>(im, ref, w, h, accurate); break; // case 2009: filter_<2, 9>(im, ref, w, h, accurate); break; // case 3009: filter_<3, 9>(im, ref, w, h, accurate); break; // case 1010: filter_<1, 10>(im, ref, w, h, accurate); break; // case 2010: filter_<2, 10>(im, ref, w, h, accurate); break; // case 3010: filter_<3, 10>(im, ref, w, h, accurate); break; // case 1011: filter_<1, 11>(im, ref, w, h, accurate); break; // case 2011: filter_<2, 11>(im, ref, w, h, accurate); break; // case 3011: filter_<3, 11>(im, ref, w, h, accurate); break; // case 1012: filter_<1, 12>(im, ref, w, h, accurate); break; // case 2012: filter_<2, 12>(im, ref, w, h, accurate); break; // case 3012: filter_<3, 12>(im, ref, w, h, accurate); break; // case 1013: filter_<1, 13>(im, ref, w, h, accurate); break; // case 2013: filter_<2, 13>(im, ref, w, h, accurate); break; // case 3013: filter_<3, 13>(im, ref, w, h, accurate); break; // case 1014: filter_<1, 14>(im, ref, w, h, accurate); break; // case 2014: filter_<2, 14>(im, ref, w, h, accurate); break; // case 3014: filter_<3, 14>(im, ref, w, h, accurate); break; // case 1015: filter_<1, 15>(im, ref, w, h, accurate); break; // case 2015: filter_<2, 15>(im, ref, w, h, accurate); break; // case 3015: filter_<3, 15>(im, ref, w, h, accurate); break; // case 1016: filter_<1, 16>(im, ref, w, h, accurate); break; // case 2016: filter_<2, 16>(im, ref, w, h, accurate); break; // case 3016: filter_<3, 16>(im, ref, w, h, accurate); break; // default: // printf("Unsupported channel counts. Reference image must have 1 to 16 channels, input image must have 1 to 3 channels\n"); // } //} // // // // //// Below here is a program for testing it on the command line //#ifndef LIBRARY // //struct header { // int frames, width, height, channels, type; //}; // //void loadTMP(const char *filename, float **data, header *h) { // FILE *f = fopen(filename, "rb"); // fread(h, sizeof(header), 1, f); // size_t size = h->frames*h->width*h->channels*h->height; // *data = new float[size]; // fread(*data, sizeof(float), size, f); // fclose(f); //} // //void saveTMP(const char *filename, float *data, header h) { // FILE *f = fopen(filename, "wb"); // fwrite(&h, sizeof(header), 1, f); // size_t size = h.frames*h.width*h.channels*h.height; // fwrite(data, sizeof(float), size, f); // fclose(f); //} // //int main(int argc, char **argv) { // initCuda(1, argv); // // if (argc < 4) { // printf("Usage: permutohedral input.tmp ref.tmp output.tmp {accurate}\n"); // return 1; // } // // bool accurate = argc == 5; // // srand(time(NULL)); // // float *im, *ref; // header imHeader, refHeader; // loadTMP(argv[1], &im, &imHeader); // loadTMP(argv[2], &ref, &refHeader); // // filter(im, ref, refHeader.channels, imHeader.channels, imHeader.width, imHeader.height, accurate); // // saveTMP(argv[3], im, imHeader); // // return 0; //} // //#endif